DatanodeProtocolProtos.java

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: DatanodeProtocol.proto

// Protobuf Java Version: 3.25.5
package org.apache.hadoop.hdfs.protocol.proto;

public final class DatanodeProtocolProtos {
  private DatanodeProtocolProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  public interface DatanodeRegistrationProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.DatanodeRegistrationProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Datanode information
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
     * @return Whether the datanodeID field is set.
     */
    boolean hasDatanodeID();
    /**
     * <pre>
     * Datanode information
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
     * @return The datanodeID.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID();
    /**
     * <pre>
     * Datanode information
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder();

    /**
     * <pre>
     * Node information
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
     * @return Whether the storageInfo field is set.
     */
    boolean hasStorageInfo();
    /**
     * <pre>
     * Node information
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
     * @return The storageInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo();
    /**
     * <pre>
     * Node information
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder();

    /**
     * <pre>
     * Block keys
     * </pre>
     *
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
     * @return Whether the keys field is set.
     */
    boolean hasKeys();
    /**
     * <pre>
     * Block keys
     * </pre>
     *
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
     * @return The keys.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys();
    /**
     * <pre>
     * Block keys
     * </pre>
     *
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();

    /**
     * <pre>
     * Software version of the DN, e.g. "2.0.0"
     * </pre>
     *
     * <code>required string softwareVersion = 4;</code>
     * @return Whether the softwareVersion field is set.
     */
    boolean hasSoftwareVersion();
    /**
     * <pre>
     * Software version of the DN, e.g. "2.0.0"
     * </pre>
     *
     * <code>required string softwareVersion = 4;</code>
     * @return The softwareVersion.
     */
    java.lang.String getSoftwareVersion();
    /**
     * <pre>
     * Software version of the DN, e.g. "2.0.0"
     * </pre>
     *
     * <code>required string softwareVersion = 4;</code>
     * @return The bytes for softwareVersion.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSoftwareVersionBytes();
  }
  /**
   * <pre>
   **
   * Information to identify a datanode to a namenode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.DatanodeRegistrationProto}
   */
  public static final class DatanodeRegistrationProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.DatanodeRegistrationProto)
      DatanodeRegistrationProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeRegistrationProto.newBuilder() to construct.
    private DatanodeRegistrationProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeRegistrationProto() {
      softwareVersion_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeRegistrationProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class);
    }

    private int bitField0_;
    public static final int DATANODEID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_;
    /**
     * <pre>
     * Datanode information
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
     * @return Whether the datanodeID field is set.
     */
    @java.lang.Override
    public boolean hasDatanodeID() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Datanode information
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
     * @return The datanodeID.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
      return datanodeID_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanodeID_;
    }
    /**
     * <pre>
     * Datanode information
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
      return datanodeID_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanodeID_;
    }

    public static final int STORAGEINFO_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_;
    /**
     * <pre>
     * Node information
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
     * @return Whether the storageInfo field is set.
     */
    @java.lang.Override
    public boolean hasStorageInfo() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * Node information
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
     * @return The storageInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
      return storageInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance() : storageInfo_;
    }
    /**
     * <pre>
     * Node information
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
      return storageInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance() : storageInfo_;
    }

    public static final int KEYS_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_;
    /**
     * <pre>
     * Block keys
     * </pre>
     *
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
     * @return Whether the keys field is set.
     */
    @java.lang.Override
    public boolean hasKeys() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * Block keys
     * </pre>
     *
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
     * @return The keys.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
      return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
    }
    /**
     * <pre>
     * Block keys
     * </pre>
     *
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
      return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
    }

    public static final int SOFTWAREVERSION_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private volatile java.lang.Object softwareVersion_ = "";
    /**
     * <pre>
     * Software version of the DN, e.g. "2.0.0"
     * </pre>
     *
     * <code>required string softwareVersion = 4;</code>
     * @return Whether the softwareVersion field is set.
     */
    @java.lang.Override
    public boolean hasSoftwareVersion() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * Software version of the DN, e.g. "2.0.0"
     * </pre>
     *
     * <code>required string softwareVersion = 4;</code>
     * @return The softwareVersion.
     */
    @java.lang.Override
    public java.lang.String getSoftwareVersion() {
      java.lang.Object ref = softwareVersion_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          softwareVersion_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * Software version of the DN, e.g. "2.0.0"
     * </pre>
     *
     * <code>required string softwareVersion = 4;</code>
     * @return The bytes for softwareVersion.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSoftwareVersionBytes() {
      java.lang.Object ref = softwareVersion_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        softwareVersion_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasDatanodeID()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStorageInfo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasKeys()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSoftwareVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getDatanodeID().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getStorageInfo().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getKeys().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getDatanodeID());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getStorageInfo());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getKeys());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, softwareVersion_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getDatanodeID());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getStorageInfo());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getKeys());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, softwareVersion_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) obj;

      if (hasDatanodeID() != other.hasDatanodeID()) return false;
      if (hasDatanodeID()) {
        if (!getDatanodeID()
            .equals(other.getDatanodeID())) return false;
      }
      if (hasStorageInfo() != other.hasStorageInfo()) return false;
      if (hasStorageInfo()) {
        if (!getStorageInfo()
            .equals(other.getStorageInfo())) return false;
      }
      if (hasKeys() != other.hasKeys()) return false;
      if (hasKeys()) {
        if (!getKeys()
            .equals(other.getKeys())) return false;
      }
      if (hasSoftwareVersion() != other.hasSoftwareVersion()) return false;
      if (hasSoftwareVersion()) {
        if (!getSoftwareVersion()
            .equals(other.getSoftwareVersion())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasDatanodeID()) {
        hash = (37 * hash) + DATANODEID_FIELD_NUMBER;
        hash = (53 * hash) + getDatanodeID().hashCode();
      }
      if (hasStorageInfo()) {
        hash = (37 * hash) + STORAGEINFO_FIELD_NUMBER;
        hash = (53 * hash) + getStorageInfo().hashCode();
      }
      if (hasKeys()) {
        hash = (37 * hash) + KEYS_FIELD_NUMBER;
        hash = (53 * hash) + getKeys().hashCode();
      }
      if (hasSoftwareVersion()) {
        hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getSoftwareVersion().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Information to identify a datanode to a namenode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.DatanodeRegistrationProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.DatanodeRegistrationProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getDatanodeIDFieldBuilder();
          getStorageInfoFieldBuilder();
          getKeysFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        datanodeID_ = null;
        if (datanodeIDBuilder_ != null) {
          datanodeIDBuilder_.dispose();
          datanodeIDBuilder_ = null;
        }
        storageInfo_ = null;
        if (storageInfoBuilder_ != null) {
          storageInfoBuilder_.dispose();
          storageInfoBuilder_ = null;
        }
        keys_ = null;
        if (keysBuilder_ != null) {
          keysBuilder_.dispose();
          keysBuilder_ = null;
        }
        softwareVersion_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.datanodeID_ = datanodeIDBuilder_ == null
              ? datanodeID_
              : datanodeIDBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.storageInfo_ = storageInfoBuilder_ == null
              ? storageInfo_
              : storageInfoBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.keys_ = keysBuilder_ == null
              ? keys_
              : keysBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.softwareVersion_ = softwareVersion_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) return this;
        if (other.hasDatanodeID()) {
          mergeDatanodeID(other.getDatanodeID());
        }
        if (other.hasStorageInfo()) {
          mergeStorageInfo(other.getStorageInfo());
        }
        if (other.hasKeys()) {
          mergeKeys(other.getKeys());
        }
        if (other.hasSoftwareVersion()) {
          softwareVersion_ = other.softwareVersion_;
          bitField0_ |= 0x00000008;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasDatanodeID()) {
          return false;
        }
        if (!hasStorageInfo()) {
          return false;
        }
        if (!hasKeys()) {
          return false;
        }
        if (!hasSoftwareVersion()) {
          return false;
        }
        if (!getDatanodeID().isInitialized()) {
          return false;
        }
        if (!getStorageInfo().isInitialized()) {
          return false;
        }
        if (!getKeys().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getDatanodeIDFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getStorageInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getKeysFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                softwareVersion_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDBuilder_;
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       * @return Whether the datanodeID field is set.
       */
      public boolean hasDatanodeID() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       * @return The datanodeID.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
        if (datanodeIDBuilder_ == null) {
          return datanodeID_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanodeID_;
        } else {
          return datanodeIDBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      public Builder setDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (datanodeIDBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          datanodeID_ = value;
        } else {
          datanodeIDBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      public Builder setDatanodeID(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
        if (datanodeIDBuilder_ == null) {
          datanodeID_ = builderForValue.build();
        } else {
          datanodeIDBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      public Builder mergeDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (datanodeIDBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            datanodeID_ != null &&
            datanodeID_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
            getDatanodeIDBuilder().mergeFrom(value);
          } else {
            datanodeID_ = value;
          }
        } else {
          datanodeIDBuilder_.mergeFrom(value);
        }
        if (datanodeID_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      public Builder clearDatanodeID() {
        bitField0_ = (bitField0_ & ~0x00000001);
        datanodeID_ = null;
        if (datanodeIDBuilder_ != null) {
          datanodeIDBuilder_.dispose();
          datanodeIDBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getDatanodeIDFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
        if (datanodeIDBuilder_ != null) {
          return datanodeIDBuilder_.getMessageOrBuilder();
        } else {
          return datanodeID_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : datanodeID_;
        }
      }
      /**
       * <pre>
       * Datanode information
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeIDProto datanodeID = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
          getDatanodeIDFieldBuilder() {
        if (datanodeIDBuilder_ == null) {
          datanodeIDBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
                  getDatanodeID(),
                  getParentForChildren(),
                  isClean());
          datanodeID_ = null;
        }
        return datanodeIDBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto storageInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> storageInfoBuilder_;
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       * @return Whether the storageInfo field is set.
       */
      public boolean hasStorageInfo() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       * @return The storageInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto getStorageInfo() {
        if (storageInfoBuilder_ == null) {
          return storageInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance() : storageInfo_;
        } else {
          return storageInfoBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      public Builder setStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
        if (storageInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          storageInfo_ = value;
        } else {
          storageInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      public Builder setStorageInfo(
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) {
        if (storageInfoBuilder_ == null) {
          storageInfo_ = builderForValue.build();
        } else {
          storageInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      public Builder mergeStorageInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) {
        if (storageInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            storageInfo_ != null &&
            storageInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) {
            getStorageInfoBuilder().mergeFrom(value);
          } else {
            storageInfo_ = value;
          }
        } else {
          storageInfoBuilder_.mergeFrom(value);
        }
        if (storageInfo_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      public Builder clearStorageInfo() {
        bitField0_ = (bitField0_ & ~0x00000002);
        storageInfo_ = null;
        if (storageInfoBuilder_ != null) {
          storageInfoBuilder_.dispose();
          storageInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageInfoBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getStorageInfoFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder getStorageInfoOrBuilder() {
        if (storageInfoBuilder_ != null) {
          return storageInfoBuilder_.getMessageOrBuilder();
        } else {
          return storageInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance() : storageInfo_;
        }
      }
      /**
       * <pre>
       * Node information
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageInfoProto storageInfo = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder> 
          getStorageInfoFieldBuilder() {
        if (storageInfoBuilder_ == null) {
          storageInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProtoOrBuilder>(
                  getStorageInfo(),
                  getParentForChildren(),
                  isClean());
          storageInfo_ = null;
        }
        return storageInfoBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       * @return Whether the keys field is set.
       */
      public boolean hasKeys() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       * @return The keys.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
        if (keysBuilder_ == null) {
          return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
        } else {
          return keysBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
        if (keysBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          keys_ = value;
        } else {
          keysBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      public Builder setKeys(
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder builderForValue) {
        if (keysBuilder_ == null) {
          keys_ = builderForValue.build();
        } else {
          keysBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
        if (keysBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            keys_ != null &&
            keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance()) {
            getKeysBuilder().mergeFrom(value);
          } else {
            keys_ = value;
          }
        } else {
          keysBuilder_.mergeFrom(value);
        }
        if (keys_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      public Builder clearKeys() {
        bitField0_ = (bitField0_ & ~0x00000004);
        keys_ = null;
        if (keysBuilder_ != null) {
          keysBuilder_.dispose();
          keysBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getKeysFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
        if (keysBuilder_ != null) {
          return keysBuilder_.getMessageOrBuilder();
        } else {
          return keys_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
        }
      }
      /**
       * <pre>
       * Block keys
       * </pre>
       *
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> 
          getKeysFieldBuilder() {
        if (keysBuilder_ == null) {
          keysBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>(
                  getKeys(),
                  getParentForChildren(),
                  isClean());
          keys_ = null;
        }
        return keysBuilder_;
      }

      private java.lang.Object softwareVersion_ = "";
      /**
       * <pre>
       * Software version of the DN, e.g. "2.0.0"
       * </pre>
       *
       * <code>required string softwareVersion = 4;</code>
       * @return Whether the softwareVersion field is set.
       */
      public boolean hasSoftwareVersion() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * Software version of the DN, e.g. "2.0.0"
       * </pre>
       *
       * <code>required string softwareVersion = 4;</code>
       * @return The softwareVersion.
       */
      public java.lang.String getSoftwareVersion() {
        java.lang.Object ref = softwareVersion_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            softwareVersion_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * Software version of the DN, e.g. "2.0.0"
       * </pre>
       *
       * <code>required string softwareVersion = 4;</code>
       * @return The bytes for softwareVersion.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSoftwareVersionBytes() {
        java.lang.Object ref = softwareVersion_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          softwareVersion_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * Software version of the DN, e.g. "2.0.0"
       * </pre>
       *
       * <code>required string softwareVersion = 4;</code>
       * @param value The softwareVersion to set.
       * @return This builder for chaining.
       */
      public Builder setSoftwareVersion(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        softwareVersion_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Software version of the DN, e.g. "2.0.0"
       * </pre>
       *
       * <code>required string softwareVersion = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearSoftwareVersion() {
        softwareVersion_ = getDefaultInstance().getSoftwareVersion();
        bitField0_ = (bitField0_ & ~0x00000008);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Software version of the DN, e.g. "2.0.0"
       * </pre>
       *
       * <code>required string softwareVersion = 4;</code>
       * @param value The bytes for softwareVersion to set.
       * @return This builder for chaining.
       */
      public Builder setSoftwareVersionBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        softwareVersion_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.DatanodeRegistrationProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.DatanodeRegistrationProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeRegistrationProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeRegistrationProto>() {
      @java.lang.Override
      public DatanodeRegistrationProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeRegistrationProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeRegistrationProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.DatanodeCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Type of the command
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
     * @return Whether the cmdType field is set.
     */
    boolean hasCmdType();
    /**
     * <pre>
     * Type of the command
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
     * @return The cmdType.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType();

    /**
     * <pre>
     * One of the following command is available when the corresponding
     * cmdType is set
     * </pre>
     *
     * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
     * @return Whether the balancerCmd field is set.
     */
    boolean hasBalancerCmd();
    /**
     * <pre>
     * One of the following command is available when the corresponding
     * cmdType is set
     * </pre>
     *
     * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
     * @return The balancerCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd();
    /**
     * <pre>
     * One of the following command is available when the corresponding
     * cmdType is set
     * </pre>
     *
     * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
     * @return Whether the blkCmd field is set.
     */
    boolean hasBlkCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
     * @return The blkCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
     * @return Whether the recoveryCmd field is set.
     */
    boolean hasRecoveryCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
     * @return The recoveryCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
     * @return Whether the finalizeCmd field is set.
     */
    boolean hasFinalizeCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
     * @return The finalizeCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
     * @return Whether the keyUpdateCmd field is set.
     */
    boolean hasKeyUpdateCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
     * @return The keyUpdateCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
     * @return Whether the registerCmd field is set.
     */
    boolean hasRegisterCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
     * @return The registerCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
     * @return Whether the blkIdCmd field is set.
     */
    boolean hasBlkIdCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
     * @return The blkIdCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getBlkIdCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder getBlkIdCmdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
     * @return Whether the blkECReconstructionCmd field is set.
     */
    boolean hasBlkECReconstructionCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
     * @return The blkECReconstructionCmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto getBlkECReconstructionCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder getBlkECReconstructionCmdOrBuilder();
  }
  /**
   * <pre>
   **
   * Commands sent from namenode to the datanodes
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.DatanodeCommandProto}
   */
  public static final class DatanodeCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.DatanodeCommandProto)
      DatanodeCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeCommandProto.newBuilder() to construct.
    private DatanodeCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeCommandProto() {
      cmdType_ = 0;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.datanode.DatanodeCommandProto.Type}
     */
    public enum Type
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>BalancerBandwidthCommand = 0;</code>
       */
      BalancerBandwidthCommand(0),
      /**
       * <code>BlockCommand = 1;</code>
       */
      BlockCommand(1),
      /**
       * <code>BlockRecoveryCommand = 2;</code>
       */
      BlockRecoveryCommand(2),
      /**
       * <code>FinalizeCommand = 3;</code>
       */
      FinalizeCommand(3),
      /**
       * <code>KeyUpdateCommand = 4;</code>
       */
      KeyUpdateCommand(4),
      /**
       * <code>RegisterCommand = 5;</code>
       */
      RegisterCommand(5),
      /**
       * <code>UnusedUpgradeCommand = 6;</code>
       */
      UnusedUpgradeCommand(6),
      /**
       * <code>NullDatanodeCommand = 7;</code>
       */
      NullDatanodeCommand(7),
      /**
       * <code>BlockIdCommand = 8;</code>
       */
      BlockIdCommand(8),
      /**
       * <code>BlockECReconstructionCommand = 9;</code>
       */
      BlockECReconstructionCommand(9),
      ;

      /**
       * <code>BalancerBandwidthCommand = 0;</code>
       */
      public static final int BalancerBandwidthCommand_VALUE = 0;
      /**
       * <code>BlockCommand = 1;</code>
       */
      public static final int BlockCommand_VALUE = 1;
      /**
       * <code>BlockRecoveryCommand = 2;</code>
       */
      public static final int BlockRecoveryCommand_VALUE = 2;
      /**
       * <code>FinalizeCommand = 3;</code>
       */
      public static final int FinalizeCommand_VALUE = 3;
      /**
       * <code>KeyUpdateCommand = 4;</code>
       */
      public static final int KeyUpdateCommand_VALUE = 4;
      /**
       * <code>RegisterCommand = 5;</code>
       */
      public static final int RegisterCommand_VALUE = 5;
      /**
       * <code>UnusedUpgradeCommand = 6;</code>
       */
      public static final int UnusedUpgradeCommand_VALUE = 6;
      /**
       * <code>NullDatanodeCommand = 7;</code>
       */
      public static final int NullDatanodeCommand_VALUE = 7;
      /**
       * <code>BlockIdCommand = 8;</code>
       */
      public static final int BlockIdCommand_VALUE = 8;
      /**
       * <code>BlockECReconstructionCommand = 9;</code>
       */
      public static final int BlockECReconstructionCommand_VALUE = 9;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static Type valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static Type forNumber(int value) {
        switch (value) {
          case 0: return BalancerBandwidthCommand;
          case 1: return BlockCommand;
          case 2: return BlockRecoveryCommand;
          case 3: return FinalizeCommand;
          case 4: return KeyUpdateCommand;
          case 5: return RegisterCommand;
          case 6: return UnusedUpgradeCommand;
          case 7: return NullDatanodeCommand;
          case 8: return BlockIdCommand;
          case 9: return BlockECReconstructionCommand;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Type>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          Type> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Type>() {
              public Type findValueByNumber(int number) {
                return Type.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final Type[] VALUES = values();

      public static Type valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private Type(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.DatanodeCommandProto.Type)
    }

    private int bitField0_;
    public static final int CMDTYPE_FIELD_NUMBER = 1;
    private int cmdType_ = 0;
    /**
     * <pre>
     * Type of the command
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
     * @return Whether the cmdType field is set.
     */
    @java.lang.Override public boolean hasCmdType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Type of the command
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
     * @return The cmdType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.forNumber(cmdType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand : result;
    }

    public static final int BALANCERCMD_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_;
    /**
     * <pre>
     * One of the following command is available when the corresponding
     * cmdType is set
     * </pre>
     *
     * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
     * @return Whether the balancerCmd field is set.
     */
    @java.lang.Override
    public boolean hasBalancerCmd() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * One of the following command is available when the corresponding
     * cmdType is set
     * </pre>
     *
     * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
     * @return The balancerCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
      return balancerCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance() : balancerCmd_;
    }
    /**
     * <pre>
     * One of the following command is available when the corresponding
     * cmdType is set
     * </pre>
     *
     * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
      return balancerCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance() : balancerCmd_;
    }

    public static final int BLKCMD_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
     * @return Whether the blkCmd field is set.
     */
    @java.lang.Override
    public boolean hasBlkCmd() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
     * @return The blkCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
      return blkCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance() : blkCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
      return blkCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance() : blkCmd_;
    }

    public static final int RECOVERYCMD_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
     * @return Whether the recoveryCmd field is set.
     */
    @java.lang.Override
    public boolean hasRecoveryCmd() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
     * @return The recoveryCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() {
      return recoveryCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance() : recoveryCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() {
      return recoveryCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance() : recoveryCmd_;
    }

    public static final int FINALIZECMD_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
     * @return Whether the finalizeCmd field is set.
     */
    @java.lang.Override
    public boolean hasFinalizeCmd() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
     * @return The finalizeCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
      return finalizeCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance() : finalizeCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
      return finalizeCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance() : finalizeCmd_;
    }

    public static final int KEYUPDATECMD_FIELD_NUMBER = 6;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
     * @return Whether the keyUpdateCmd field is set.
     */
    @java.lang.Override
    public boolean hasKeyUpdateCmd() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
     * @return The keyUpdateCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
      return keyUpdateCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance() : keyUpdateCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
      return keyUpdateCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance() : keyUpdateCmd_;
    }

    public static final int REGISTERCMD_FIELD_NUMBER = 7;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
     * @return Whether the registerCmd field is set.
     */
    @java.lang.Override
    public boolean hasRegisterCmd() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
     * @return The registerCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
      return registerCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance() : registerCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
      return registerCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance() : registerCmd_;
    }

    public static final int BLKIDCMD_FIELD_NUMBER = 8;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto blkIdCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
     * @return Whether the blkIdCmd field is set.
     */
    @java.lang.Override
    public boolean hasBlkIdCmd() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
     * @return The blkIdCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getBlkIdCmd() {
      return blkIdCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance() : blkIdCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder getBlkIdCmdOrBuilder() {
      return blkIdCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance() : blkIdCmd_;
    }

    public static final int BLKECRECONSTRUCTIONCMD_FIELD_NUMBER = 9;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto blkECReconstructionCmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
     * @return Whether the blkECReconstructionCmd field is set.
     */
    @java.lang.Override
    public boolean hasBlkECReconstructionCmd() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
     * @return The blkECReconstructionCmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto getBlkECReconstructionCmd() {
      return blkECReconstructionCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance() : blkECReconstructionCmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder getBlkECReconstructionCmdOrBuilder() {
      return blkECReconstructionCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance() : blkECReconstructionCmd_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasCmdType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasBalancerCmd()) {
        if (!getBalancerCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasBlkCmd()) {
        if (!getBlkCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasRecoveryCmd()) {
        if (!getRecoveryCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasFinalizeCmd()) {
        if (!getFinalizeCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasKeyUpdateCmd()) {
        if (!getKeyUpdateCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasBlkIdCmd()) {
        if (!getBlkIdCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasBlkECReconstructionCmd()) {
        if (!getBlkECReconstructionCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, cmdType_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getBalancerCmd());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getBlkCmd());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(4, getRecoveryCmd());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(5, getFinalizeCmd());
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeMessage(6, getKeyUpdateCmd());
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeMessage(7, getRegisterCmd());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeMessage(8, getBlkIdCmd());
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeMessage(9, getBlkECReconstructionCmd());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, cmdType_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getBalancerCmd());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getBlkCmd());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getRecoveryCmd());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getFinalizeCmd());
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(6, getKeyUpdateCmd());
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(7, getRegisterCmd());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(8, getBlkIdCmd());
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(9, getBlkECReconstructionCmd());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) obj;

      if (hasCmdType() != other.hasCmdType()) return false;
      if (hasCmdType()) {
        if (cmdType_ != other.cmdType_) return false;
      }
      if (hasBalancerCmd() != other.hasBalancerCmd()) return false;
      if (hasBalancerCmd()) {
        if (!getBalancerCmd()
            .equals(other.getBalancerCmd())) return false;
      }
      if (hasBlkCmd() != other.hasBlkCmd()) return false;
      if (hasBlkCmd()) {
        if (!getBlkCmd()
            .equals(other.getBlkCmd())) return false;
      }
      if (hasRecoveryCmd() != other.hasRecoveryCmd()) return false;
      if (hasRecoveryCmd()) {
        if (!getRecoveryCmd()
            .equals(other.getRecoveryCmd())) return false;
      }
      if (hasFinalizeCmd() != other.hasFinalizeCmd()) return false;
      if (hasFinalizeCmd()) {
        if (!getFinalizeCmd()
            .equals(other.getFinalizeCmd())) return false;
      }
      if (hasKeyUpdateCmd() != other.hasKeyUpdateCmd()) return false;
      if (hasKeyUpdateCmd()) {
        if (!getKeyUpdateCmd()
            .equals(other.getKeyUpdateCmd())) return false;
      }
      if (hasRegisterCmd() != other.hasRegisterCmd()) return false;
      if (hasRegisterCmd()) {
        if (!getRegisterCmd()
            .equals(other.getRegisterCmd())) return false;
      }
      if (hasBlkIdCmd() != other.hasBlkIdCmd()) return false;
      if (hasBlkIdCmd()) {
        if (!getBlkIdCmd()
            .equals(other.getBlkIdCmd())) return false;
      }
      if (hasBlkECReconstructionCmd() != other.hasBlkECReconstructionCmd()) return false;
      if (hasBlkECReconstructionCmd()) {
        if (!getBlkECReconstructionCmd()
            .equals(other.getBlkECReconstructionCmd())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCmdType()) {
        hash = (37 * hash) + CMDTYPE_FIELD_NUMBER;
        hash = (53 * hash) + cmdType_;
      }
      if (hasBalancerCmd()) {
        hash = (37 * hash) + BALANCERCMD_FIELD_NUMBER;
        hash = (53 * hash) + getBalancerCmd().hashCode();
      }
      if (hasBlkCmd()) {
        hash = (37 * hash) + BLKCMD_FIELD_NUMBER;
        hash = (53 * hash) + getBlkCmd().hashCode();
      }
      if (hasRecoveryCmd()) {
        hash = (37 * hash) + RECOVERYCMD_FIELD_NUMBER;
        hash = (53 * hash) + getRecoveryCmd().hashCode();
      }
      if (hasFinalizeCmd()) {
        hash = (37 * hash) + FINALIZECMD_FIELD_NUMBER;
        hash = (53 * hash) + getFinalizeCmd().hashCode();
      }
      if (hasKeyUpdateCmd()) {
        hash = (37 * hash) + KEYUPDATECMD_FIELD_NUMBER;
        hash = (53 * hash) + getKeyUpdateCmd().hashCode();
      }
      if (hasRegisterCmd()) {
        hash = (37 * hash) + REGISTERCMD_FIELD_NUMBER;
        hash = (53 * hash) + getRegisterCmd().hashCode();
      }
      if (hasBlkIdCmd()) {
        hash = (37 * hash) + BLKIDCMD_FIELD_NUMBER;
        hash = (53 * hash) + getBlkIdCmd().hashCode();
      }
      if (hasBlkECReconstructionCmd()) {
        hash = (37 * hash) + BLKECRECONSTRUCTIONCMD_FIELD_NUMBER;
        hash = (53 * hash) + getBlkECReconstructionCmd().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Commands sent from namenode to the datanodes
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.DatanodeCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.DatanodeCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBalancerCmdFieldBuilder();
          getBlkCmdFieldBuilder();
          getRecoveryCmdFieldBuilder();
          getFinalizeCmdFieldBuilder();
          getKeyUpdateCmdFieldBuilder();
          getRegisterCmdFieldBuilder();
          getBlkIdCmdFieldBuilder();
          getBlkECReconstructionCmdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        cmdType_ = 0;
        balancerCmd_ = null;
        if (balancerCmdBuilder_ != null) {
          balancerCmdBuilder_.dispose();
          balancerCmdBuilder_ = null;
        }
        blkCmd_ = null;
        if (blkCmdBuilder_ != null) {
          blkCmdBuilder_.dispose();
          blkCmdBuilder_ = null;
        }
        recoveryCmd_ = null;
        if (recoveryCmdBuilder_ != null) {
          recoveryCmdBuilder_.dispose();
          recoveryCmdBuilder_ = null;
        }
        finalizeCmd_ = null;
        if (finalizeCmdBuilder_ != null) {
          finalizeCmdBuilder_.dispose();
          finalizeCmdBuilder_ = null;
        }
        keyUpdateCmd_ = null;
        if (keyUpdateCmdBuilder_ != null) {
          keyUpdateCmdBuilder_.dispose();
          keyUpdateCmdBuilder_ = null;
        }
        registerCmd_ = null;
        if (registerCmdBuilder_ != null) {
          registerCmdBuilder_.dispose();
          registerCmdBuilder_ = null;
        }
        blkIdCmd_ = null;
        if (blkIdCmdBuilder_ != null) {
          blkIdCmdBuilder_.dispose();
          blkIdCmdBuilder_ = null;
        }
        blkECReconstructionCmd_ = null;
        if (blkECReconstructionCmdBuilder_ != null) {
          blkECReconstructionCmdBuilder_.dispose();
          blkECReconstructionCmdBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.cmdType_ = cmdType_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.balancerCmd_ = balancerCmdBuilder_ == null
              ? balancerCmd_
              : balancerCmdBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.blkCmd_ = blkCmdBuilder_ == null
              ? blkCmd_
              : blkCmdBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.recoveryCmd_ = recoveryCmdBuilder_ == null
              ? recoveryCmd_
              : recoveryCmdBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.finalizeCmd_ = finalizeCmdBuilder_ == null
              ? finalizeCmd_
              : finalizeCmdBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.keyUpdateCmd_ = keyUpdateCmdBuilder_ == null
              ? keyUpdateCmd_
              : keyUpdateCmdBuilder_.build();
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.registerCmd_ = registerCmdBuilder_ == null
              ? registerCmd_
              : registerCmdBuilder_.build();
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.blkIdCmd_ = blkIdCmdBuilder_ == null
              ? blkIdCmd_
              : blkIdCmdBuilder_.build();
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.blkECReconstructionCmd_ = blkECReconstructionCmdBuilder_ == null
              ? blkECReconstructionCmd_
              : blkECReconstructionCmdBuilder_.build();
          to_bitField0_ |= 0x00000100;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) return this;
        if (other.hasCmdType()) {
          setCmdType(other.getCmdType());
        }
        if (other.hasBalancerCmd()) {
          mergeBalancerCmd(other.getBalancerCmd());
        }
        if (other.hasBlkCmd()) {
          mergeBlkCmd(other.getBlkCmd());
        }
        if (other.hasRecoveryCmd()) {
          mergeRecoveryCmd(other.getRecoveryCmd());
        }
        if (other.hasFinalizeCmd()) {
          mergeFinalizeCmd(other.getFinalizeCmd());
        }
        if (other.hasKeyUpdateCmd()) {
          mergeKeyUpdateCmd(other.getKeyUpdateCmd());
        }
        if (other.hasRegisterCmd()) {
          mergeRegisterCmd(other.getRegisterCmd());
        }
        if (other.hasBlkIdCmd()) {
          mergeBlkIdCmd(other.getBlkIdCmd());
        }
        if (other.hasBlkECReconstructionCmd()) {
          mergeBlkECReconstructionCmd(other.getBlkECReconstructionCmd());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasCmdType()) {
          return false;
        }
        if (hasBalancerCmd()) {
          if (!getBalancerCmd().isInitialized()) {
            return false;
          }
        }
        if (hasBlkCmd()) {
          if (!getBlkCmd().isInitialized()) {
            return false;
          }
        }
        if (hasRecoveryCmd()) {
          if (!getRecoveryCmd().isInitialized()) {
            return false;
          }
        }
        if (hasFinalizeCmd()) {
          if (!getFinalizeCmd().isInitialized()) {
            return false;
          }
        }
        if (hasKeyUpdateCmd()) {
          if (!getKeyUpdateCmd().isInitialized()) {
            return false;
          }
        }
        if (hasBlkIdCmd()) {
          if (!getBlkIdCmd().isInitialized()) {
            return false;
          }
        }
        if (hasBlkECReconstructionCmd()) {
          if (!getBlkECReconstructionCmd().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  cmdType_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                input.readMessage(
                    getBalancerCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getBlkCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getRecoveryCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                input.readMessage(
                    getFinalizeCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 50: {
                input.readMessage(
                    getKeyUpdateCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              case 58: {
                input.readMessage(
                    getRegisterCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              case 66: {
                input.readMessage(
                    getBlkIdCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000080;
                break;
              } // case 66
              case 74: {
                input.readMessage(
                    getBlkECReconstructionCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int cmdType_ = 0;
      /**
       * <pre>
       * Type of the command
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
       * @return Whether the cmdType field is set.
       */
      @java.lang.Override public boolean hasCmdType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Type of the command
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
       * @return The cmdType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.forNumber(cmdType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand : result;
      }
      /**
       * <pre>
       * Type of the command
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
       * @param value The cmdType to set.
       * @return This builder for chaining.
       */
      public Builder setCmdType(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        cmdType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Type of the command
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeCommandProto.Type cmdType = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearCmdType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        cmdType_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> balancerCmdBuilder_;
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       * @return Whether the balancerCmd field is set.
       */
      public boolean hasBalancerCmd() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       * @return The balancerCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
        if (balancerCmdBuilder_ == null) {
          return balancerCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance() : balancerCmd_;
        } else {
          return balancerCmdBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      public Builder setBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
        if (balancerCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          balancerCmd_ = value;
        } else {
          balancerCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      public Builder setBalancerCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder builderForValue) {
        if (balancerCmdBuilder_ == null) {
          balancerCmd_ = builderForValue.build();
        } else {
          balancerCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
        if (balancerCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            balancerCmd_ != null &&
            balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) {
            getBalancerCmdBuilder().mergeFrom(value);
          } else {
            balancerCmd_ = value;
          }
        } else {
          balancerCmdBuilder_.mergeFrom(value);
        }
        if (balancerCmd_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      public Builder clearBalancerCmd() {
        bitField0_ = (bitField0_ & ~0x00000002);
        balancerCmd_ = null;
        if (balancerCmdBuilder_ != null) {
          balancerCmdBuilder_.dispose();
          balancerCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder getBalancerCmdBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getBalancerCmdFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
        if (balancerCmdBuilder_ != null) {
          return balancerCmdBuilder_.getMessageOrBuilder();
        } else {
          return balancerCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance() : balancerCmd_;
        }
      }
      /**
       * <pre>
       * One of the following command is available when the corresponding
       * cmdType is set
       * </pre>
       *
       * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> 
          getBalancerCmdFieldBuilder() {
        if (balancerCmdBuilder_ == null) {
          balancerCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>(
                  getBalancerCmd(),
                  getParentForChildren(),
                  isClean());
          balancerCmd_ = null;
        }
        return balancerCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> blkCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       * @return Whether the blkCmd field is set.
       */
      public boolean hasBlkCmd() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       * @return The blkCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
        if (blkCmdBuilder_ == null) {
          return blkCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance() : blkCmd_;
        } else {
          return blkCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      public Builder setBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
        if (blkCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blkCmd_ = value;
        } else {
          blkCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      public Builder setBlkCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder builderForValue) {
        if (blkCmdBuilder_ == null) {
          blkCmd_ = builderForValue.build();
        } else {
          blkCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      public Builder mergeBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
        if (blkCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            blkCmd_ != null &&
            blkCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) {
            getBlkCmdBuilder().mergeFrom(value);
          } else {
            blkCmd_ = value;
          }
        } else {
          blkCmdBuilder_.mergeFrom(value);
        }
        if (blkCmd_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      public Builder clearBlkCmd() {
        bitField0_ = (bitField0_ & ~0x00000004);
        blkCmd_ = null;
        if (blkCmdBuilder_ != null) {
          blkCmdBuilder_.dispose();
          blkCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder getBlkCmdBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getBlkCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
        if (blkCmdBuilder_ != null) {
          return blkCmdBuilder_.getMessageOrBuilder();
        } else {
          return blkCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance() : blkCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockCommandProto blkCmd = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> 
          getBlkCmdFieldBuilder() {
        if (blkCmdBuilder_ == null) {
          blkCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>(
                  getBlkCmd(),
                  getParentForChildren(),
                  isClean());
          blkCmd_ = null;
        }
        return blkCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto recoveryCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder> recoveryCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       * @return Whether the recoveryCmd field is set.
       */
      public boolean hasRecoveryCmd() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       * @return The recoveryCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getRecoveryCmd() {
        if (recoveryCmdBuilder_ == null) {
          return recoveryCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance() : recoveryCmd_;
        } else {
          return recoveryCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      public Builder setRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) {
        if (recoveryCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          recoveryCmd_ = value;
        } else {
          recoveryCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      public Builder setRecoveryCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder builderForValue) {
        if (recoveryCmdBuilder_ == null) {
          recoveryCmd_ = builderForValue.build();
        } else {
          recoveryCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      public Builder mergeRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto value) {
        if (recoveryCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            recoveryCmd_ != null &&
            recoveryCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) {
            getRecoveryCmdBuilder().mergeFrom(value);
          } else {
            recoveryCmd_ = value;
          }
        } else {
          recoveryCmdBuilder_.mergeFrom(value);
        }
        if (recoveryCmd_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      public Builder clearRecoveryCmd() {
        bitField0_ = (bitField0_ & ~0x00000008);
        recoveryCmd_ = null;
        if (recoveryCmdBuilder_ != null) {
          recoveryCmdBuilder_.dispose();
          recoveryCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder getRecoveryCmdBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getRecoveryCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder getRecoveryCmdOrBuilder() {
        if (recoveryCmdBuilder_ != null) {
          return recoveryCmdBuilder_.getMessageOrBuilder();
        } else {
          return recoveryCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance() : recoveryCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockRecoveryCommandProto recoveryCmd = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder> 
          getRecoveryCmdFieldBuilder() {
        if (recoveryCmdBuilder_ == null) {
          recoveryCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder>(
                  getRecoveryCmd(),
                  getParentForChildren(),
                  isClean());
          recoveryCmd_ = null;
        }
        return recoveryCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> finalizeCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       * @return Whether the finalizeCmd field is set.
       */
      public boolean hasFinalizeCmd() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       * @return The finalizeCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
        if (finalizeCmdBuilder_ == null) {
          return finalizeCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance() : finalizeCmd_;
        } else {
          return finalizeCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      public Builder setFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
        if (finalizeCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          finalizeCmd_ = value;
        } else {
          finalizeCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      public Builder setFinalizeCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder builderForValue) {
        if (finalizeCmdBuilder_ == null) {
          finalizeCmd_ = builderForValue.build();
        } else {
          finalizeCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      public Builder mergeFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
        if (finalizeCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            finalizeCmd_ != null &&
            finalizeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) {
            getFinalizeCmdBuilder().mergeFrom(value);
          } else {
            finalizeCmd_ = value;
          }
        } else {
          finalizeCmdBuilder_.mergeFrom(value);
        }
        if (finalizeCmd_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      public Builder clearFinalizeCmd() {
        bitField0_ = (bitField0_ & ~0x00000010);
        finalizeCmd_ = null;
        if (finalizeCmdBuilder_ != null) {
          finalizeCmdBuilder_.dispose();
          finalizeCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder getFinalizeCmdBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getFinalizeCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
        if (finalizeCmdBuilder_ != null) {
          return finalizeCmdBuilder_.getMessageOrBuilder();
        } else {
          return finalizeCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance() : finalizeCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.FinalizeCommandProto finalizeCmd = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> 
          getFinalizeCmdFieldBuilder() {
        if (finalizeCmdBuilder_ == null) {
          finalizeCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>(
                  getFinalizeCmd(),
                  getParentForChildren(),
                  isClean());
          finalizeCmd_ = null;
        }
        return finalizeCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> keyUpdateCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       * @return Whether the keyUpdateCmd field is set.
       */
      public boolean hasKeyUpdateCmd() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       * @return The keyUpdateCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
        if (keyUpdateCmdBuilder_ == null) {
          return keyUpdateCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance() : keyUpdateCmd_;
        } else {
          return keyUpdateCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      public Builder setKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
        if (keyUpdateCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          keyUpdateCmd_ = value;
        } else {
          keyUpdateCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      public Builder setKeyUpdateCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder builderForValue) {
        if (keyUpdateCmdBuilder_ == null) {
          keyUpdateCmd_ = builderForValue.build();
        } else {
          keyUpdateCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
        if (keyUpdateCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000020) != 0) &&
            keyUpdateCmd_ != null &&
            keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) {
            getKeyUpdateCmdBuilder().mergeFrom(value);
          } else {
            keyUpdateCmd_ = value;
          }
        } else {
          keyUpdateCmdBuilder_.mergeFrom(value);
        }
        if (keyUpdateCmd_ != null) {
          bitField0_ |= 0x00000020;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      public Builder clearKeyUpdateCmd() {
        bitField0_ = (bitField0_ & ~0x00000020);
        keyUpdateCmd_ = null;
        if (keyUpdateCmdBuilder_ != null) {
          keyUpdateCmdBuilder_.dispose();
          keyUpdateCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder getKeyUpdateCmdBuilder() {
        bitField0_ |= 0x00000020;
        onChanged();
        return getKeyUpdateCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
        if (keyUpdateCmdBuilder_ != null) {
          return keyUpdateCmdBuilder_.getMessageOrBuilder();
        } else {
          return keyUpdateCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance() : keyUpdateCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> 
          getKeyUpdateCmdFieldBuilder() {
        if (keyUpdateCmdBuilder_ == null) {
          keyUpdateCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>(
                  getKeyUpdateCmd(),
                  getParentForChildren(),
                  isClean());
          keyUpdateCmd_ = null;
        }
        return keyUpdateCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> registerCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       * @return Whether the registerCmd field is set.
       */
      public boolean hasRegisterCmd() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       * @return The registerCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
        if (registerCmdBuilder_ == null) {
          return registerCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance() : registerCmd_;
        } else {
          return registerCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      public Builder setRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
        if (registerCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registerCmd_ = value;
        } else {
          registerCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      public Builder setRegisterCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder builderForValue) {
        if (registerCmdBuilder_ == null) {
          registerCmd_ = builderForValue.build();
        } else {
          registerCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      public Builder mergeRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
        if (registerCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000040) != 0) &&
            registerCmd_ != null &&
            registerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) {
            getRegisterCmdBuilder().mergeFrom(value);
          } else {
            registerCmd_ = value;
          }
        } else {
          registerCmdBuilder_.mergeFrom(value);
        }
        if (registerCmd_ != null) {
          bitField0_ |= 0x00000040;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      public Builder clearRegisterCmd() {
        bitField0_ = (bitField0_ & ~0x00000040);
        registerCmd_ = null;
        if (registerCmdBuilder_ != null) {
          registerCmdBuilder_.dispose();
          registerCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder getRegisterCmdBuilder() {
        bitField0_ |= 0x00000040;
        onChanged();
        return getRegisterCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
        if (registerCmdBuilder_ != null) {
          return registerCmdBuilder_.getMessageOrBuilder();
        } else {
          return registerCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance() : registerCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.RegisterCommandProto registerCmd = 7;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> 
          getRegisterCmdFieldBuilder() {
        if (registerCmdBuilder_ == null) {
          registerCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>(
                  getRegisterCmd(),
                  getParentForChildren(),
                  isClean());
          registerCmd_ = null;
        }
        return registerCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto blkIdCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder> blkIdCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       * @return Whether the blkIdCmd field is set.
       */
      public boolean hasBlkIdCmd() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       * @return The blkIdCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getBlkIdCmd() {
        if (blkIdCmdBuilder_ == null) {
          return blkIdCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance() : blkIdCmd_;
        } else {
          return blkIdCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      public Builder setBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) {
        if (blkIdCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blkIdCmd_ = value;
        } else {
          blkIdCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      public Builder setBlkIdCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder builderForValue) {
        if (blkIdCmdBuilder_ == null) {
          blkIdCmd_ = builderForValue.build();
        } else {
          blkIdCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) {
        if (blkIdCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000080) != 0) &&
            blkIdCmd_ != null &&
            blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) {
            getBlkIdCmdBuilder().mergeFrom(value);
          } else {
            blkIdCmd_ = value;
          }
        } else {
          blkIdCmdBuilder_.mergeFrom(value);
        }
        if (blkIdCmd_ != null) {
          bitField0_ |= 0x00000080;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      public Builder clearBlkIdCmd() {
        bitField0_ = (bitField0_ & ~0x00000080);
        blkIdCmd_ = null;
        if (blkIdCmdBuilder_ != null) {
          blkIdCmdBuilder_.dispose();
          blkIdCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder getBlkIdCmdBuilder() {
        bitField0_ |= 0x00000080;
        onChanged();
        return getBlkIdCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder getBlkIdCmdOrBuilder() {
        if (blkIdCmdBuilder_ != null) {
          return blkIdCmdBuilder_.getMessageOrBuilder();
        } else {
          return blkIdCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance() : blkIdCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder> 
          getBlkIdCmdFieldBuilder() {
        if (blkIdCmdBuilder_ == null) {
          blkIdCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder>(
                  getBlkIdCmd(),
                  getParentForChildren(),
                  isClean());
          blkIdCmd_ = null;
        }
        return blkIdCmdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto blkECReconstructionCmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder> blkECReconstructionCmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       * @return Whether the blkECReconstructionCmd field is set.
       */
      public boolean hasBlkECReconstructionCmd() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       * @return The blkECReconstructionCmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto getBlkECReconstructionCmd() {
        if (blkECReconstructionCmdBuilder_ == null) {
          return blkECReconstructionCmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance() : blkECReconstructionCmd_;
        } else {
          return blkECReconstructionCmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      public Builder setBlkECReconstructionCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto value) {
        if (blkECReconstructionCmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blkECReconstructionCmd_ = value;
        } else {
          blkECReconstructionCmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      public Builder setBlkECReconstructionCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder builderForValue) {
        if (blkECReconstructionCmdBuilder_ == null) {
          blkECReconstructionCmd_ = builderForValue.build();
        } else {
          blkECReconstructionCmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      public Builder mergeBlkECReconstructionCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto value) {
        if (blkECReconstructionCmdBuilder_ == null) {
          if (((bitField0_ & 0x00000100) != 0) &&
            blkECReconstructionCmd_ != null &&
            blkECReconstructionCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance()) {
            getBlkECReconstructionCmdBuilder().mergeFrom(value);
          } else {
            blkECReconstructionCmd_ = value;
          }
        } else {
          blkECReconstructionCmdBuilder_.mergeFrom(value);
        }
        if (blkECReconstructionCmd_ != null) {
          bitField0_ |= 0x00000100;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      public Builder clearBlkECReconstructionCmd() {
        bitField0_ = (bitField0_ & ~0x00000100);
        blkECReconstructionCmd_ = null;
        if (blkECReconstructionCmdBuilder_ != null) {
          blkECReconstructionCmdBuilder_.dispose();
          blkECReconstructionCmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder getBlkECReconstructionCmdBuilder() {
        bitField0_ |= 0x00000100;
        onChanged();
        return getBlkECReconstructionCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder getBlkECReconstructionCmdOrBuilder() {
        if (blkECReconstructionCmdBuilder_ != null) {
          return blkECReconstructionCmdBuilder_.getMessageOrBuilder();
        } else {
          return blkECReconstructionCmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance() : blkECReconstructionCmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockECReconstructionCommandProto blkECReconstructionCmd = 9;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder> 
          getBlkECReconstructionCmdFieldBuilder() {
        if (blkECReconstructionCmdBuilder_ == null) {
          blkECReconstructionCmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder>(
                  getBlkECReconstructionCmd(),
                  getParentForChildren(),
                  isClean());
          blkECReconstructionCmd_ = null;
        }
        return blkECReconstructionCmdBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.DatanodeCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.DatanodeCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeCommandProto>() {
      @java.lang.Override
      public DatanodeCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BalancerBandwidthCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Maximum bandwidth to be used by datanode for balancing
     * </pre>
     *
     * <code>required uint64 bandwidth = 1;</code>
     * @return Whether the bandwidth field is set.
     */
    boolean hasBandwidth();
    /**
     * <pre>
     * Maximum bandwidth to be used by datanode for balancing
     * </pre>
     *
     * <code>required uint64 bandwidth = 1;</code>
     * @return The bandwidth.
     */
    long getBandwidth();
  }
  /**
   * <pre>
   **
   * Command sent from namenode to datanode to set the
   * maximum bandwidth to be used for balancing.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BalancerBandwidthCommandProto}
   */
  public static final class BalancerBandwidthCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
      BalancerBandwidthCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BalancerBandwidthCommandProto.newBuilder() to construct.
    private BalancerBandwidthCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BalancerBandwidthCommandProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BalancerBandwidthCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class);
    }

    private int bitField0_;
    public static final int BANDWIDTH_FIELD_NUMBER = 1;
    private long bandwidth_ = 0L;
    /**
     * <pre>
     * Maximum bandwidth to be used by datanode for balancing
     * </pre>
     *
     * <code>required uint64 bandwidth = 1;</code>
     * @return Whether the bandwidth field is set.
     */
    @java.lang.Override
    public boolean hasBandwidth() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Maximum bandwidth to be used by datanode for balancing
     * </pre>
     *
     * <code>required uint64 bandwidth = 1;</code>
     * @return The bandwidth.
     */
    @java.lang.Override
    public long getBandwidth() {
      return bandwidth_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBandwidth()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, bandwidth_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, bandwidth_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj;

      if (hasBandwidth() != other.hasBandwidth()) return false;
      if (hasBandwidth()) {
        if (getBandwidth()
            != other.getBandwidth()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBandwidth()) {
        hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBandwidth());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Command sent from namenode to datanode to set the
     * maximum bandwidth to be used for balancing.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BalancerBandwidthCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        bandwidth_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.bandwidth_ = bandwidth_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) return this;
        if (other.hasBandwidth()) {
          setBandwidth(other.getBandwidth());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBandwidth()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                bandwidth_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long bandwidth_ ;
      /**
       * <pre>
       * Maximum bandwidth to be used by datanode for balancing
       * </pre>
       *
       * <code>required uint64 bandwidth = 1;</code>
       * @return Whether the bandwidth field is set.
       */
      @java.lang.Override
      public boolean hasBandwidth() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Maximum bandwidth to be used by datanode for balancing
       * </pre>
       *
       * <code>required uint64 bandwidth = 1;</code>
       * @return The bandwidth.
       */
      @java.lang.Override
      public long getBandwidth() {
        return bandwidth_;
      }
      /**
       * <pre>
       * Maximum bandwidth to be used by datanode for balancing
       * </pre>
       *
       * <code>required uint64 bandwidth = 1;</code>
       * @param value The bandwidth to set.
       * @return This builder for chaining.
       */
      public Builder setBandwidth(long value) {

        bandwidth_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Maximum bandwidth to be used by datanode for balancing
       * </pre>
       *
       * <code>required uint64 bandwidth = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBandwidth() {
        bitField0_ = (bitField0_ & ~0x00000001);
        bandwidth_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BalancerBandwidthCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BalancerBandwidthCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BalancerBandwidthCommandProto>() {
      @java.lang.Override
      public BalancerBandwidthCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BalancerBandwidthCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BalancerBandwidthCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
     * @return Whether the action field is set.
     */
    boolean hasAction();
    /**
     * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
     * @return The action.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction();

    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> 
        getBlocksList();
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    int getBlocksCount();
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
        getBlocksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> 
        getTargetsList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index);
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    int getTargetsCount();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> 
        getTargetsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto> 
        getTargetStorageUuidsList();
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getTargetStorageUuids(int index);
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    int getTargetStorageUuidsCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder> 
        getTargetStorageUuidsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder getTargetStorageUuidsOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> 
        getTargetStorageTypesList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getTargetStorageTypes(int index);
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    int getTargetStorageTypesCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
        getTargetStorageTypesOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getTargetStorageTypesOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * Command to instruct datanodes to perform certain action
   * on the given set of blocks.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockCommandProto}
   */
  public static final class BlockCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockCommandProto)
      BlockCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockCommandProto.newBuilder() to construct.
    private BlockCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockCommandProto() {
      action_ = 1;
      blockPoolId_ = "";
      blocks_ = java.util.Collections.emptyList();
      targets_ = java.util.Collections.emptyList();
      targetStorageUuids_ = java.util.Collections.emptyList();
      targetStorageTypes_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.datanode.BlockCommandProto.Action}
     */
    public enum Action
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <pre>
       * Transfer blocks to another datanode
       * </pre>
       *
       * <code>TRANSFER = 1;</code>
       */
      TRANSFER(1),
      /**
       * <pre>
       * Invalidate blocks
       * </pre>
       *
       * <code>INVALIDATE = 2;</code>
       */
      INVALIDATE(2),
      /**
       * <pre>
       * Shutdown the datanode
       * </pre>
       *
       * <code>SHUTDOWN = 3;</code>
       */
      SHUTDOWN(3),
      ;

      /**
       * <pre>
       * Transfer blocks to another datanode
       * </pre>
       *
       * <code>TRANSFER = 1;</code>
       */
      public static final int TRANSFER_VALUE = 1;
      /**
       * <pre>
       * Invalidate blocks
       * </pre>
       *
       * <code>INVALIDATE = 2;</code>
       */
      public static final int INVALIDATE_VALUE = 2;
      /**
       * <pre>
       * Shutdown the datanode
       * </pre>
       *
       * <code>SHUTDOWN = 3;</code>
       */
      public static final int SHUTDOWN_VALUE = 3;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static Action valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static Action forNumber(int value) {
        switch (value) {
          case 1: return TRANSFER;
          case 2: return INVALIDATE;
          case 3: return SHUTDOWN;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Action>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          Action> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Action>() {
              public Action findValueByNumber(int number) {
                return Action.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final Action[] VALUES = values();

      public static Action valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private Action(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.BlockCommandProto.Action)
    }

    private int bitField0_;
    public static final int ACTION_FIELD_NUMBER = 1;
    private int action_ = 1;
    /**
     * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
     * @return Whether the action field is set.
     */
    @java.lang.Override public boolean hasAction() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
     * @return The action.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() {
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.forNumber(action_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER : result;
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_;
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
        getBlocksOrBuilderList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    @java.lang.Override
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
      return blocks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
        int index) {
      return blocks_.get(index);
    }

    public static final int TARGETS_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> targets_;
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> getTargetsList() {
      return targets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> 
        getTargetsOrBuilderList() {
      return targets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    @java.lang.Override
    public int getTargetsCount() {
      return targets_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) {
      return targets_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
        int index) {
      return targets_.get(index);
    }

    public static final int TARGETSTORAGEUUIDS_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto> targetStorageUuids_;
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto> getTargetStorageUuidsList() {
      return targetStorageUuids_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder> 
        getTargetStorageUuidsOrBuilderList() {
      return targetStorageUuids_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    @java.lang.Override
    public int getTargetStorageUuidsCount() {
      return targetStorageUuids_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getTargetStorageUuids(int index) {
      return targetStorageUuids_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder getTargetStorageUuidsOrBuilder(
        int index) {
      return targetStorageUuids_.get(index);
    }

    public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> targetStorageTypes_;
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> getTargetStorageTypesList() {
      return targetStorageTypes_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
        getTargetStorageTypesOrBuilderList() {
      return targetStorageTypes_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    @java.lang.Override
    public int getTargetStorageTypesCount() {
      return targetStorageTypes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getTargetStorageTypes(int index) {
      return targetStorageTypes_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getTargetStorageTypesOrBuilder(
        int index) {
      return targetStorageTypes_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasAction()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getBlocksCount(); i++) {
        if (!getBlocks(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      for (int i = 0; i < getTargetsCount(); i++) {
        if (!getTargets(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, action_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeMessage(3, blocks_.get(i));
      }
      for (int i = 0; i < targets_.size(); i++) {
        output.writeMessage(4, targets_.get(i));
      }
      for (int i = 0; i < targetStorageUuids_.size(); i++) {
        output.writeMessage(5, targetStorageUuids_.get(i));
      }
      for (int i = 0; i < targetStorageTypes_.size(); i++) {
        output.writeMessage(6, targetStorageTypes_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, action_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, blocks_.get(i));
      }
      for (int i = 0; i < targets_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, targets_.get(i));
      }
      for (int i = 0; i < targetStorageUuids_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, targetStorageUuids_.get(i));
      }
      for (int i = 0; i < targetStorageTypes_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(6, targetStorageTypes_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) obj;

      if (hasAction() != other.hasAction()) return false;
      if (hasAction()) {
        if (action_ != other.action_) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (!getTargetsList()
          .equals(other.getTargetsList())) return false;
      if (!getTargetStorageUuidsList()
          .equals(other.getTargetStorageUuidsList())) return false;
      if (!getTargetStorageTypesList()
          .equals(other.getTargetStorageTypesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasAction()) {
        hash = (37 * hash) + ACTION_FIELD_NUMBER;
        hash = (53 * hash) + action_;
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      if (getTargetsCount() > 0) {
        hash = (37 * hash) + TARGETS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetsList().hashCode();
      }
      if (getTargetStorageUuidsCount() > 0) {
        hash = (37 * hash) + TARGETSTORAGEUUIDS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetStorageUuidsList().hashCode();
      }
      if (getTargetStorageTypesCount() > 0) {
        hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
        hash = (53 * hash) + getTargetStorageTypesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Command to instruct datanodes to perform certain action
     * on the given set of blocks.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        action_ = 1;
        blockPoolId_ = "";
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
        } else {
          blocks_ = null;
          blocksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000004);
        if (targetsBuilder_ == null) {
          targets_ = java.util.Collections.emptyList();
        } else {
          targets_ = null;
          targetsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000008);
        if (targetStorageUuidsBuilder_ == null) {
          targetStorageUuids_ = java.util.Collections.emptyList();
        } else {
          targetStorageUuids_ = null;
          targetStorageUuidsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000010);
        if (targetStorageTypesBuilder_ == null) {
          targetStorageTypes_ = java.util.Collections.emptyList();
        } else {
          targetStorageTypes_ = null;
          targetStorageTypesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000020);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result) {
        if (blocksBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0)) {
            blocks_ = java.util.Collections.unmodifiableList(blocks_);
            bitField0_ = (bitField0_ & ~0x00000004);
          }
          result.blocks_ = blocks_;
        } else {
          result.blocks_ = blocksBuilder_.build();
        }
        if (targetsBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0)) {
            targets_ = java.util.Collections.unmodifiableList(targets_);
            bitField0_ = (bitField0_ & ~0x00000008);
          }
          result.targets_ = targets_;
        } else {
          result.targets_ = targetsBuilder_.build();
        }
        if (targetStorageUuidsBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0)) {
            targetStorageUuids_ = java.util.Collections.unmodifiableList(targetStorageUuids_);
            bitField0_ = (bitField0_ & ~0x00000010);
          }
          result.targetStorageUuids_ = targetStorageUuids_;
        } else {
          result.targetStorageUuids_ = targetStorageUuidsBuilder_.build();
        }
        if (targetStorageTypesBuilder_ == null) {
          if (((bitField0_ & 0x00000020) != 0)) {
            targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
            bitField0_ = (bitField0_ & ~0x00000020);
          }
          result.targetStorageTypes_ = targetStorageTypes_;
        } else {
          result.targetStorageTypes_ = targetStorageTypesBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.action_ = action_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) return this;
        if (other.hasAction()) {
          setAction(other.getAction());
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (blocksBuilder_ == null) {
          if (!other.blocks_.isEmpty()) {
            if (blocks_.isEmpty()) {
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000004);
            } else {
              ensureBlocksIsMutable();
              blocks_.addAll(other.blocks_);
            }
            onChanged();
          }
        } else {
          if (!other.blocks_.isEmpty()) {
            if (blocksBuilder_.isEmpty()) {
              blocksBuilder_.dispose();
              blocksBuilder_ = null;
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000004);
              blocksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlocksFieldBuilder() : null;
            } else {
              blocksBuilder_.addAllMessages(other.blocks_);
            }
          }
        }
        if (targetsBuilder_ == null) {
          if (!other.targets_.isEmpty()) {
            if (targets_.isEmpty()) {
              targets_ = other.targets_;
              bitField0_ = (bitField0_ & ~0x00000008);
            } else {
              ensureTargetsIsMutable();
              targets_.addAll(other.targets_);
            }
            onChanged();
          }
        } else {
          if (!other.targets_.isEmpty()) {
            if (targetsBuilder_.isEmpty()) {
              targetsBuilder_.dispose();
              targetsBuilder_ = null;
              targets_ = other.targets_;
              bitField0_ = (bitField0_ & ~0x00000008);
              targetsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getTargetsFieldBuilder() : null;
            } else {
              targetsBuilder_.addAllMessages(other.targets_);
            }
          }
        }
        if (targetStorageUuidsBuilder_ == null) {
          if (!other.targetStorageUuids_.isEmpty()) {
            if (targetStorageUuids_.isEmpty()) {
              targetStorageUuids_ = other.targetStorageUuids_;
              bitField0_ = (bitField0_ & ~0x00000010);
            } else {
              ensureTargetStorageUuidsIsMutable();
              targetStorageUuids_.addAll(other.targetStorageUuids_);
            }
            onChanged();
          }
        } else {
          if (!other.targetStorageUuids_.isEmpty()) {
            if (targetStorageUuidsBuilder_.isEmpty()) {
              targetStorageUuidsBuilder_.dispose();
              targetStorageUuidsBuilder_ = null;
              targetStorageUuids_ = other.targetStorageUuids_;
              bitField0_ = (bitField0_ & ~0x00000010);
              targetStorageUuidsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getTargetStorageUuidsFieldBuilder() : null;
            } else {
              targetStorageUuidsBuilder_.addAllMessages(other.targetStorageUuids_);
            }
          }
        }
        if (targetStorageTypesBuilder_ == null) {
          if (!other.targetStorageTypes_.isEmpty()) {
            if (targetStorageTypes_.isEmpty()) {
              targetStorageTypes_ = other.targetStorageTypes_;
              bitField0_ = (bitField0_ & ~0x00000020);
            } else {
              ensureTargetStorageTypesIsMutable();
              targetStorageTypes_.addAll(other.targetStorageTypes_);
            }
            onChanged();
          }
        } else {
          if (!other.targetStorageTypes_.isEmpty()) {
            if (targetStorageTypesBuilder_.isEmpty()) {
              targetStorageTypesBuilder_.dispose();
              targetStorageTypesBuilder_ = null;
              targetStorageTypes_ = other.targetStorageTypes_;
              bitField0_ = (bitField0_ & ~0x00000020);
              targetStorageTypesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getTargetStorageTypesFieldBuilder() : null;
            } else {
              targetStorageTypesBuilder_.addAllMessages(other.targetStorageTypes_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasAction()) {
          return false;
        }
        if (!hasBlockPoolId()) {
          return false;
        }
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            return false;
          }
        }
        for (int i = 0; i < getTargetsCount(); i++) {
          if (!getTargets(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  action_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER,
                        extensionRegistry);
                if (blocksBuilder_ == null) {
                  ensureBlocksIsMutable();
                  blocks_.add(m);
                } else {
                  blocksBuilder_.addMessage(m);
                }
                break;
              } // case 26
              case 34: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.PARSER,
                        extensionRegistry);
                if (targetsBuilder_ == null) {
                  ensureTargetsIsMutable();
                  targets_.add(m);
                } else {
                  targetsBuilder_.addMessage(m);
                }
                break;
              } // case 34
              case 42: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.PARSER,
                        extensionRegistry);
                if (targetStorageUuidsBuilder_ == null) {
                  ensureTargetStorageUuidsIsMutable();
                  targetStorageUuids_.add(m);
                } else {
                  targetStorageUuidsBuilder_.addMessage(m);
                }
                break;
              } // case 42
              case 50: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.PARSER,
                        extensionRegistry);
                if (targetStorageTypesBuilder_ == null) {
                  ensureTargetStorageTypesIsMutable();
                  targetStorageTypes_.add(m);
                } else {
                  targetStorageTypesBuilder_.addMessage(m);
                }
                break;
              } // case 50
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int action_ = 1;
      /**
       * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
       * @return Whether the action field is set.
       */
      @java.lang.Override public boolean hasAction() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
       * @return The action.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action getAction() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.forNumber(action_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action.TRANSFER : result;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
       * @param value The action to set.
       * @return This builder for chaining.
       */
      public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Action value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        action_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.BlockCommandProto.Action action = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearAction() {
        bitField0_ = (bitField0_ & ~0x00000001);
        action_ = 1;
        onChanged();
        return this;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_ =
        java.util.Collections.emptyList();
      private void ensureBlocksIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>(blocks_);
          bitField0_ |= 0x00000004;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
        if (blocksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blocks_);
        } else {
          return blocksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public int getBlocksCount() {
        if (blocksBuilder_ == null) {
          return blocks_.size();
        } else {
          return blocksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);
        } else {
          return blocksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.set(index, value);
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.set(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(index, value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder addBlocks(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blocks_);
          onChanged();
        } else {
          blocksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder clearBlocks() {
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
        } else {
          blocksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public Builder removeBlocks(int index) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.remove(index);
          onChanged();
        } else {
          blocksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
          int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);  } else {
          return blocksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
           getBlocksOrBuilderList() {
        if (blocksBuilder_ != null) {
          return blocksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blocks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
        return getBlocksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder> 
           getBlocksBuilderList() {
        return getBlocksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getBlocksFieldBuilder() {
        if (blocksBuilder_ == null) {
          blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                  blocks_,
                  ((bitField0_ & 0x00000004) != 0),
                  getParentForChildren(),
                  isClean());
          blocks_ = null;
        }
        return blocksBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> targets_ =
        java.util.Collections.emptyList();
      private void ensureTargetsIsMutable() {
        if (!((bitField0_ & 0x00000008) != 0)) {
          targets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto>(targets_);
          bitField0_ |= 0x00000008;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> targetsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> getTargetsList() {
        if (targetsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(targets_);
        } else {
          return targetsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public int getTargetsCount() {
        if (targetsBuilder_ == null) {
          return targets_.size();
        } else {
          return targetsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getTargets(int index) {
        if (targetsBuilder_ == null) {
          return targets_.get(index);
        } else {
          return targetsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder setTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.set(index, value);
          onChanged();
        } else {
          targetsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder setTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.set(index, builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.add(value);
          onChanged();
        } else {
          targetsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder addTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.add(index, value);
          onChanged();
        } else {
          targetsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder addTargets(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.add(builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder addTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.add(index, builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder addAllTargets(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto> values) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, targets_);
          onChanged();
        } else {
          targetsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder clearTargets() {
        if (targetsBuilder_ == null) {
          targets_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000008);
          onChanged();
        } else {
          targetsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public Builder removeTargets(int index) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.remove(index);
          onChanged();
        } else {
          targetsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getTargetsBuilder(
          int index) {
        return getTargetsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getTargetsOrBuilder(
          int index) {
        if (targetsBuilder_ == null) {
          return targets_.get(index);  } else {
          return targetsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> 
           getTargetsOrBuilderList() {
        if (targetsBuilder_ != null) {
          return targetsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(targets_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder() {
        return getTargetsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder addTargetsBuilder(
          int index) {
        return getTargetsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfosProto targets = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder> 
           getTargetsBuilderList() {
        return getTargetsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> 
          getTargetsFieldBuilder() {
        if (targetsBuilder_ == null) {
          targetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>(
                  targets_,
                  ((bitField0_ & 0x00000008) != 0),
                  getParentForChildren(),
                  isClean());
          targets_ = null;
        }
        return targetsBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto> targetStorageUuids_ =
        java.util.Collections.emptyList();
      private void ensureTargetStorageUuidsIsMutable() {
        if (!((bitField0_ & 0x00000010) != 0)) {
          targetStorageUuids_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto>(targetStorageUuids_);
          bitField0_ |= 0x00000010;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder> targetStorageUuidsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto> getTargetStorageUuidsList() {
        if (targetStorageUuidsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(targetStorageUuids_);
        } else {
          return targetStorageUuidsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public int getTargetStorageUuidsCount() {
        if (targetStorageUuidsBuilder_ == null) {
          return targetStorageUuids_.size();
        } else {
          return targetStorageUuidsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getTargetStorageUuids(int index) {
        if (targetStorageUuidsBuilder_ == null) {
          return targetStorageUuids_.get(index);
        } else {
          return targetStorageUuidsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder setTargetStorageUuids(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto value) {
        if (targetStorageUuidsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.set(index, value);
          onChanged();
        } else {
          targetStorageUuidsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder setTargetStorageUuids(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder builderForValue) {
        if (targetStorageUuidsBuilder_ == null) {
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.set(index, builderForValue.build());
          onChanged();
        } else {
          targetStorageUuidsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder addTargetStorageUuids(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto value) {
        if (targetStorageUuidsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.add(value);
          onChanged();
        } else {
          targetStorageUuidsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder addTargetStorageUuids(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto value) {
        if (targetStorageUuidsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.add(index, value);
          onChanged();
        } else {
          targetStorageUuidsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder addTargetStorageUuids(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder builderForValue) {
        if (targetStorageUuidsBuilder_ == null) {
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.add(builderForValue.build());
          onChanged();
        } else {
          targetStorageUuidsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder addTargetStorageUuids(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder builderForValue) {
        if (targetStorageUuidsBuilder_ == null) {
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.add(index, builderForValue.build());
          onChanged();
        } else {
          targetStorageUuidsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder addAllTargetStorageUuids(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto> values) {
        if (targetStorageUuidsBuilder_ == null) {
          ensureTargetStorageUuidsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, targetStorageUuids_);
          onChanged();
        } else {
          targetStorageUuidsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder clearTargetStorageUuids() {
        if (targetStorageUuidsBuilder_ == null) {
          targetStorageUuids_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000010);
          onChanged();
        } else {
          targetStorageUuidsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public Builder removeTargetStorageUuids(int index) {
        if (targetStorageUuidsBuilder_ == null) {
          ensureTargetStorageUuidsIsMutable();
          targetStorageUuids_.remove(index);
          onChanged();
        } else {
          targetStorageUuidsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder getTargetStorageUuidsBuilder(
          int index) {
        return getTargetStorageUuidsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder getTargetStorageUuidsOrBuilder(
          int index) {
        if (targetStorageUuidsBuilder_ == null) {
          return targetStorageUuids_.get(index);  } else {
          return targetStorageUuidsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder> 
           getTargetStorageUuidsOrBuilderList() {
        if (targetStorageUuidsBuilder_ != null) {
          return targetStorageUuidsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(targetStorageUuids_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder addTargetStorageUuidsBuilder() {
        return getTargetStorageUuidsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder addTargetStorageUuidsBuilder(
          int index) {
        return getTargetStorageUuidsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageUuidsProto targetStorageUuids = 5;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder> 
           getTargetStorageUuidsBuilderList() {
        return getTargetStorageUuidsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder> 
          getTargetStorageUuidsFieldBuilder() {
        if (targetStorageUuidsBuilder_ == null) {
          targetStorageUuidsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder>(
                  targetStorageUuids_,
                  ((bitField0_ & 0x00000010) != 0),
                  getParentForChildren(),
                  isClean());
          targetStorageUuids_ = null;
        }
        return targetStorageUuidsBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> targetStorageTypes_ =
        java.util.Collections.emptyList();
      private void ensureTargetStorageTypesIsMutable() {
        if (!((bitField0_ & 0x00000020) != 0)) {
          targetStorageTypes_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto>(targetStorageTypes_);
          bitField0_ |= 0x00000020;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> targetStorageTypesBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> getTargetStorageTypesList() {
        if (targetStorageTypesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(targetStorageTypes_);
        } else {
          return targetStorageTypesBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public int getTargetStorageTypesCount() {
        if (targetStorageTypesBuilder_ == null) {
          return targetStorageTypes_.size();
        } else {
          return targetStorageTypesBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getTargetStorageTypes(int index) {
        if (targetStorageTypesBuilder_ == null) {
          return targetStorageTypes_.get(index);
        } else {
          return targetStorageTypesBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder setTargetStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (targetStorageTypesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.set(index, value);
          onChanged();
        } else {
          targetStorageTypesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder setTargetStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
        if (targetStorageTypesBuilder_ == null) {
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.set(index, builderForValue.build());
          onChanged();
        } else {
          targetStorageTypesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (targetStorageTypesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.add(value);
          onChanged();
        } else {
          targetStorageTypesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder addTargetStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (targetStorageTypesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.add(index, value);
          onChanged();
        } else {
          targetStorageTypesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder addTargetStorageTypes(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
        if (targetStorageTypesBuilder_ == null) {
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.add(builderForValue.build());
          onChanged();
        } else {
          targetStorageTypesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder addTargetStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
        if (targetStorageTypesBuilder_ == null) {
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.add(index, builderForValue.build());
          onChanged();
        } else {
          targetStorageTypesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder addAllTargetStorageTypes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto> values) {
        if (targetStorageTypesBuilder_ == null) {
          ensureTargetStorageTypesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, targetStorageTypes_);
          onChanged();
        } else {
          targetStorageTypesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder clearTargetStorageTypes() {
        if (targetStorageTypesBuilder_ == null) {
          targetStorageTypes_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000020);
          onChanged();
        } else {
          targetStorageTypesBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public Builder removeTargetStorageTypes(int index) {
        if (targetStorageTypesBuilder_ == null) {
          ensureTargetStorageTypesIsMutable();
          targetStorageTypes_.remove(index);
          onChanged();
        } else {
          targetStorageTypesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getTargetStorageTypesBuilder(
          int index) {
        return getTargetStorageTypesFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getTargetStorageTypesOrBuilder(
          int index) {
        if (targetStorageTypesBuilder_ == null) {
          return targetStorageTypes_.get(index);  } else {
          return targetStorageTypesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
           getTargetStorageTypesOrBuilderList() {
        if (targetStorageTypesBuilder_ != null) {
          return targetStorageTypesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(targetStorageTypes_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder addTargetStorageTypesBuilder() {
        return getTargetStorageTypesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder addTargetStorageTypesBuilder(
          int index) {
        return getTargetStorageTypesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypesProto targetStorageTypes = 6;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder> 
           getTargetStorageTypesBuilderList() {
        return getTargetStorageTypesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
          getTargetStorageTypesFieldBuilder() {
        if (targetStorageTypesBuilder_ == null) {
          targetStorageTypesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
                  targetStorageTypes_,
                  ((bitField0_ & 0x00000020) != 0),
                  getParentForChildren(),
                  isClean());
          targetStorageTypes_ = null;
        }
        return targetStorageTypesBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockCommandProto>() {
      @java.lang.Override
      public BlockCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockIdCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockIdCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
     * @return Whether the action field is set.
     */
    boolean hasAction();
    /**
     * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
     * @return The action.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action getAction();

    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>repeated uint64 blockIds = 3 [packed = true];</code>
     * @return A list containing the blockIds.
     */
    java.util.List<java.lang.Long> getBlockIdsList();
    /**
     * <code>repeated uint64 blockIds = 3 [packed = true];</code>
     * @return The count of blockIds.
     */
    int getBlockIdsCount();
    /**
     * <code>repeated uint64 blockIds = 3 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The blockIds at the given index.
     */
    long getBlockIds(int index);
  }
  /**
   * <pre>
   **
   * Command to instruct datanodes to perform certain action
   * on the given set of block IDs.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockIdCommandProto}
   */
  public static final class BlockIdCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockIdCommandProto)
      BlockIdCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockIdCommandProto.newBuilder() to construct.
    private BlockIdCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockIdCommandProto() {
      action_ = 1;
      blockPoolId_ = "";
      blockIds_ = emptyLongList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockIdCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.datanode.BlockIdCommandProto.Action}
     */
    public enum Action
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>CACHE = 1;</code>
       */
      CACHE(1),
      /**
       * <code>UNCACHE = 2;</code>
       */
      UNCACHE(2),
      ;

      /**
       * <code>CACHE = 1;</code>
       */
      public static final int CACHE_VALUE = 1;
      /**
       * <code>UNCACHE = 2;</code>
       */
      public static final int UNCACHE_VALUE = 2;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static Action valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static Action forNumber(int value) {
        switch (value) {
          case 1: return CACHE;
          case 2: return UNCACHE;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Action>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          Action> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Action>() {
              public Action findValueByNumber(int number) {
                return Action.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final Action[] VALUES = values();

      public static Action valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private Action(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.BlockIdCommandProto.Action)
    }

    private int bitField0_;
    public static final int ACTION_FIELD_NUMBER = 1;
    private int action_ = 1;
    /**
     * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
     * @return Whether the action field is set.
     */
    @java.lang.Override public boolean hasAction() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
     * @return The action.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action getAction() {
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.forNumber(action_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.CACHE : result;
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKIDS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.LongList blockIds_ =
        emptyLongList();
    /**
     * <code>repeated uint64 blockIds = 3 [packed = true];</code>
     * @return A list containing the blockIds.
     */
    @java.lang.Override
    public java.util.List<java.lang.Long>
        getBlockIdsList() {
      return blockIds_;
    }
    /**
     * <code>repeated uint64 blockIds = 3 [packed = true];</code>
     * @return The count of blockIds.
     */
    public int getBlockIdsCount() {
      return blockIds_.size();
    }
    /**
     * <code>repeated uint64 blockIds = 3 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The blockIds at the given index.
     */
    public long getBlockIds(int index) {
      return blockIds_.getLong(index);
    }
    private int blockIdsMemoizedSerializedSize = -1;

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasAction()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, action_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_);
      }
      if (getBlockIdsList().size() > 0) {
        output.writeUInt32NoTag(26);
        output.writeUInt32NoTag(blockIdsMemoizedSerializedSize);
      }
      for (int i = 0; i < blockIds_.size(); i++) {
        output.writeUInt64NoTag(blockIds_.getLong(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, action_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < blockIds_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64SizeNoTag(blockIds_.getLong(i));
        }
        size += dataSize;
        if (!getBlockIdsList().isEmpty()) {
          size += 1;
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeInt32SizeNoTag(dataSize);
        }
        blockIdsMemoizedSerializedSize = dataSize;
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto) obj;

      if (hasAction() != other.hasAction()) return false;
      if (hasAction()) {
        if (action_ != other.action_) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (!getBlockIdsList()
          .equals(other.getBlockIdsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasAction()) {
        hash = (37 * hash) + ACTION_FIELD_NUMBER;
        hash = (53 * hash) + action_;
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (getBlockIdsCount() > 0) {
        hash = (37 * hash) + BLOCKIDS_FIELD_NUMBER;
        hash = (53 * hash) + getBlockIdsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Command to instruct datanodes to perform certain action
     * on the given set of block IDs.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockIdCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockIdCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        action_ = 1;
        blockPoolId_ = "";
        blockIds_ = emptyLongList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.action_ = action_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          blockIds_.makeImmutable();
          result.blockIds_ = blockIds_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) return this;
        if (other.hasAction()) {
          setAction(other.getAction());
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (!other.blockIds_.isEmpty()) {
          if (blockIds_.isEmpty()) {
            blockIds_ = other.blockIds_;
            blockIds_.makeImmutable();
            bitField0_ |= 0x00000004;
          } else {
            ensureBlockIdsIsMutable();
            blockIds_.addAll(other.blockIds_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasAction()) {
          return false;
        }
        if (!hasBlockPoolId()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  action_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                long v = input.readUInt64();
                ensureBlockIdsIsMutable();
                blockIds_.addLong(v);
                break;
              } // case 24
              case 26: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                ensureBlockIdsIsMutable();
                while (input.getBytesUntilLimit() > 0) {
                  blockIds_.addLong(input.readUInt64());
                }
                input.popLimit(limit);
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int action_ = 1;
      /**
       * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
       * @return Whether the action field is set.
       */
      @java.lang.Override public boolean hasAction() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
       * @return The action.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action getAction() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.forNumber(action_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action.CACHE : result;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
       * @param value The action to set.
       * @return This builder for chaining.
       */
      public Builder setAction(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.Action value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        action_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.BlockIdCommandProto.Action action = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearAction() {
        bitField0_ = (bitField0_ & ~0x00000001);
        action_ = 1;
        onChanged();
        return this;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.LongList blockIds_ = emptyLongList();
      private void ensureBlockIdsIsMutable() {
        if (!blockIds_.isModifiable()) {
          blockIds_ = makeMutableCopy(blockIds_);
        }
        bitField0_ |= 0x00000004;
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @return A list containing the blockIds.
       */
      public java.util.List<java.lang.Long>
          getBlockIdsList() {
        blockIds_.makeImmutable();
        return blockIds_;
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @return The count of blockIds.
       */
      public int getBlockIdsCount() {
        return blockIds_.size();
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The blockIds at the given index.
       */
      public long getBlockIds(int index) {
        return blockIds_.getLong(index);
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @param index The index to set the value at.
       * @param value The blockIds to set.
       * @return This builder for chaining.
       */
      public Builder setBlockIds(
          int index, long value) {

        ensureBlockIdsIsMutable();
        blockIds_.setLong(index, value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @param value The blockIds to add.
       * @return This builder for chaining.
       */
      public Builder addBlockIds(long value) {

        ensureBlockIdsIsMutable();
        blockIds_.addLong(value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @param values The blockIds to add.
       * @return This builder for chaining.
       */
      public Builder addAllBlockIds(
          java.lang.Iterable<? extends java.lang.Long> values) {
        ensureBlockIdsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, blockIds_);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blockIds = 3 [packed = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockIds() {
        blockIds_ = emptyLongList();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockIdCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockIdCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockIdCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockIdCommandProto>() {
      @java.lang.Override
      public BlockIdCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockIdCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockIdCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockRecoveryCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> 
        getBlocksList();
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getBlocks(int index);
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    int getBlocksCount();
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder> 
        getBlocksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * List of blocks to be recovered by the datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockRecoveryCommandProto}
   */
  public static final class BlockRecoveryCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
      BlockRecoveryCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockRecoveryCommandProto.newBuilder() to construct.
    private BlockRecoveryCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockRecoveryCommandProto() {
      blocks_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockRecoveryCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder.class);
    }

    public static final int BLOCKS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> blocks_;
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder> 
        getBlocksOrBuilderList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getBlocks(int index) {
      return blocks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
        int index) {
      return blocks_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getBlocksCount(); i++) {
        if (!getBlocks(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeMessage(1, blocks_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < blocks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, blocks_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) obj;

      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * List of blocks to be recovered by the datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockRecoveryCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
        } else {
          blocks_ = null;
          blocksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result) {
        if (blocksBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            blocks_ = java.util.Collections.unmodifiableList(blocks_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.blocks_ = blocks_;
        } else {
          result.blocks_ = blocksBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance()) return this;
        if (blocksBuilder_ == null) {
          if (!other.blocks_.isEmpty()) {
            if (blocks_.isEmpty()) {
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureBlocksIsMutable();
              blocks_.addAll(other.blocks_);
            }
            onChanged();
          }
        } else {
          if (!other.blocks_.isEmpty()) {
            if (blocksBuilder_.isEmpty()) {
              blocksBuilder_.dispose();
              blocksBuilder_ = null;
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000001);
              blocksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlocksFieldBuilder() : null;
            } else {
              blocksBuilder_.addAllMessages(other.blocks_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.PARSER,
                        extensionRegistry);
                if (blocksBuilder_ == null) {
                  ensureBlocksIsMutable();
                  blocks_.add(m);
                } else {
                  blocksBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> blocks_ =
        java.util.Collections.emptyList();
      private void ensureBlocksIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto>(blocks_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder> blocksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> getBlocksList() {
        if (blocksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blocks_);
        } else {
          return blocksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public int getBlocksCount() {
        if (blocksBuilder_ == null) {
          return blocks_.size();
        } else {
          return blocksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto getBlocks(int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);
        } else {
          return blocksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.set(index, value);
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.set(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(index, value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto> values) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blocks_);
          onChanged();
        } else {
          blocksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder clearBlocks() {
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          blocksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public Builder removeBlocks(int index) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.remove(index);
          onChanged();
        } else {
          blocksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder getBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
          int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);  } else {
          return blocksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder> 
           getBlocksOrBuilderList() {
        if (blocksBuilder_ != null) {
          return blocksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blocks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder addBlocksBuilder() {
        return getBlocksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder addBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.RecoveringBlockProto blocks = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder> 
           getBlocksBuilderList() {
        return getBlocksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder> 
          getBlocksFieldBuilder() {
        if (blocksBuilder_ == null) {
          blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProtoOrBuilder>(
                  blocks_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          blocks_ = null;
        }
        return blocksBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockRecoveryCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockRecoveryCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockRecoveryCommandProto>() {
      @java.lang.Override
      public BlockRecoveryCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockRecoveryCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockRecoveryCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface FinalizeCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.FinalizeCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Block pool to be finalized
     * </pre>
     *
     * <code>required string blockPoolId = 1;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <pre>
     * Block pool to be finalized
     * </pre>
     *
     * <code>required string blockPoolId = 1;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <pre>
     * Block pool to be finalized
     * </pre>
     *
     * <code>required string blockPoolId = 1;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();
  }
  /**
   * <pre>
   **
   * Finalize the upgrade at the datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.FinalizeCommandProto}
   */
  public static final class FinalizeCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.FinalizeCommandProto)
      FinalizeCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FinalizeCommandProto.newBuilder() to construct.
    private FinalizeCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FinalizeCommandProto() {
      blockPoolId_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FinalizeCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <pre>
     * Block pool to be finalized
     * </pre>
     *
     * <code>required string blockPoolId = 1;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Block pool to be finalized
     * </pre>
     *
     * <code>required string blockPoolId = 1;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * Block pool to be finalized
     * </pre>
     *
     * <code>required string blockPoolId = 1;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, blockPoolId_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, blockPoolId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) obj;

      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Finalize the upgrade at the datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.FinalizeCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.FinalizeCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        blockPoolId_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) return this;
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlockPoolId()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object blockPoolId_ = "";
      /**
       * <pre>
       * Block pool to be finalized
       * </pre>
       *
       * <code>required string blockPoolId = 1;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Block pool to be finalized
       * </pre>
       *
       * <code>required string blockPoolId = 1;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * Block pool to be finalized
       * </pre>
       *
       * <code>required string blockPoolId = 1;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * Block pool to be finalized
       * </pre>
       *
       * <code>required string blockPoolId = 1;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block pool to be finalized
       * </pre>
       *
       * <code>required string blockPoolId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block pool to be finalized
       * </pre>
       *
       * <code>required string blockPoolId = 1;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.FinalizeCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.FinalizeCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FinalizeCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FinalizeCommandProto>() {
      @java.lang.Override
      public FinalizeCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FinalizeCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FinalizeCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface KeyUpdateCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.KeyUpdateCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
     * @return Whether the keys field is set.
     */
    boolean hasKeys();
    /**
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
     * @return The keys.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys();
    /**
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
  }
  /**
   * <pre>
   **
   * Update the block keys at the datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.KeyUpdateCommandProto}
   */
  public static final class KeyUpdateCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.KeyUpdateCommandProto)
      KeyUpdateCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use KeyUpdateCommandProto.newBuilder() to construct.
    private KeyUpdateCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private KeyUpdateCommandProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new KeyUpdateCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class);
    }

    private int bitField0_;
    public static final int KEYS_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_;
    /**
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
     * @return Whether the keys field is set.
     */
    @java.lang.Override
    public boolean hasKeys() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
     * @return The keys.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
      return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
    }
    /**
     * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
      return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasKeys()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getKeys().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getKeys());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getKeys());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) obj;

      if (hasKeys() != other.hasKeys()) return false;
      if (hasKeys()) {
        if (!getKeys()
            .equals(other.getKeys())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasKeys()) {
        hash = (37 * hash) + KEYS_FIELD_NUMBER;
        hash = (53 * hash) + getKeys().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Update the block keys at the datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.KeyUpdateCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.KeyUpdateCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getKeysFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        keys_ = null;
        if (keysBuilder_ != null) {
          keysBuilder_.dispose();
          keysBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.keys_ = keysBuilder_ == null
              ? keys_
              : keysBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this;
        if (other.hasKeys()) {
          mergeKeys(other.getKeys());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasKeys()) {
          return false;
        }
        if (!getKeys().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getKeysFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto keys_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       * @return Whether the keys field is set.
       */
      public boolean hasKeys() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       * @return The keys.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto getKeys() {
        if (keysBuilder_ == null) {
          return keys_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
        } else {
          return keysBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
        if (keysBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          keys_ = value;
        } else {
          keysBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      public Builder setKeys(
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder builderForValue) {
        if (keysBuilder_ == null) {
          keys_ = builderForValue.build();
        } else {
          keysBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto value) {
        if (keysBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            keys_ != null &&
            keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance()) {
            getKeysBuilder().mergeFrom(value);
          } else {
            keys_ = value;
          }
        } else {
          keysBuilder_.mergeFrom(value);
        }
        if (keys_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      public Builder clearKeys() {
        bitField0_ = (bitField0_ & ~0x00000001);
        keys_ = null;
        if (keysBuilder_ != null) {
          keysBuilder_.dispose();
          keysBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getKeysFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
        if (keysBuilder_ != null) {
          return keysBuilder_.getMessageOrBuilder();
        } else {
          return keys_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.getDefaultInstance() : keys_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExportedBlockKeysProto keys = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder> 
          getKeysFieldBuilder() {
        if (keysBuilder_ == null) {
          keysBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProtoOrBuilder>(
                  getKeys(),
                  getParentForChildren(),
                  isClean());
          keys_ = null;
        }
        return keysBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.KeyUpdateCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.KeyUpdateCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<KeyUpdateCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<KeyUpdateCommandProto>() {
      @java.lang.Override
      public KeyUpdateCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<KeyUpdateCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<KeyUpdateCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RegisterCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.RegisterCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * Instruct datanode to register with the namenode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.RegisterCommandProto}
   */
  public static final class RegisterCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.RegisterCommandProto)
      RegisterCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RegisterCommandProto.newBuilder() to construct.
    private RegisterCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RegisterCommandProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RegisterCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Instruct datanode to register with the namenode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.RegisterCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.RegisterCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.RegisterCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.RegisterCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RegisterCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RegisterCommandProto>() {
      @java.lang.Override
      public RegisterCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RegisterCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RegisterCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockECReconstructionCommandProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockECReconstructionCommandProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto> 
        getBlockECReconstructioninfoList();
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto getBlockECReconstructioninfo(int index);
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    int getBlockECReconstructioninfoCount();
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder> 
        getBlockECReconstructioninfoOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder getBlockECReconstructioninfoOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * Block Erasure coding reconstruction command
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockECReconstructionCommandProto}
   */
  public static final class BlockECReconstructionCommandProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockECReconstructionCommandProto)
      BlockECReconstructionCommandProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockECReconstructionCommandProto.newBuilder() to construct.
    private BlockECReconstructionCommandProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockECReconstructionCommandProto() {
      blockECReconstructioninfo_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockECReconstructionCommandProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder.class);
    }

    public static final int BLOCKECRECONSTRUCTIONINFO_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto> blockECReconstructioninfo_;
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto> getBlockECReconstructioninfoList() {
      return blockECReconstructioninfo_;
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder> 
        getBlockECReconstructioninfoOrBuilderList() {
      return blockECReconstructioninfo_;
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    @java.lang.Override
    public int getBlockECReconstructioninfoCount() {
      return blockECReconstructioninfo_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto getBlockECReconstructioninfo(int index) {
      return blockECReconstructioninfo_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder getBlockECReconstructioninfoOrBuilder(
        int index) {
      return blockECReconstructioninfo_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getBlockECReconstructioninfoCount(); i++) {
        if (!getBlockECReconstructioninfo(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < blockECReconstructioninfo_.size(); i++) {
        output.writeMessage(1, blockECReconstructioninfo_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < blockECReconstructioninfo_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, blockECReconstructioninfo_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto) obj;

      if (!getBlockECReconstructioninfoList()
          .equals(other.getBlockECReconstructioninfoList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getBlockECReconstructioninfoCount() > 0) {
        hash = (37 * hash) + BLOCKECRECONSTRUCTIONINFO_FIELD_NUMBER;
        hash = (53 * hash) + getBlockECReconstructioninfoList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Block Erasure coding reconstruction command
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockECReconstructionCommandProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockECReconstructionCommandProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (blockECReconstructioninfoBuilder_ == null) {
          blockECReconstructioninfo_ = java.util.Collections.emptyList();
        } else {
          blockECReconstructioninfo_ = null;
          blockECReconstructioninfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto result) {
        if (blockECReconstructioninfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            blockECReconstructioninfo_ = java.util.Collections.unmodifiableList(blockECReconstructioninfo_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.blockECReconstructioninfo_ = blockECReconstructioninfo_;
        } else {
          result.blockECReconstructioninfo_ = blockECReconstructioninfoBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance()) return this;
        if (blockECReconstructioninfoBuilder_ == null) {
          if (!other.blockECReconstructioninfo_.isEmpty()) {
            if (blockECReconstructioninfo_.isEmpty()) {
              blockECReconstructioninfo_ = other.blockECReconstructioninfo_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureBlockECReconstructioninfoIsMutable();
              blockECReconstructioninfo_.addAll(other.blockECReconstructioninfo_);
            }
            onChanged();
          }
        } else {
          if (!other.blockECReconstructioninfo_.isEmpty()) {
            if (blockECReconstructioninfoBuilder_.isEmpty()) {
              blockECReconstructioninfoBuilder_.dispose();
              blockECReconstructioninfoBuilder_ = null;
              blockECReconstructioninfo_ = other.blockECReconstructioninfo_;
              bitField0_ = (bitField0_ & ~0x00000001);
              blockECReconstructioninfoBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlockECReconstructioninfoFieldBuilder() : null;
            } else {
              blockECReconstructioninfoBuilder_.addAllMessages(other.blockECReconstructioninfo_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getBlockECReconstructioninfoCount(); i++) {
          if (!getBlockECReconstructioninfo(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.PARSER,
                        extensionRegistry);
                if (blockECReconstructioninfoBuilder_ == null) {
                  ensureBlockECReconstructioninfoIsMutable();
                  blockECReconstructioninfo_.add(m);
                } else {
                  blockECReconstructioninfoBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto> blockECReconstructioninfo_ =
        java.util.Collections.emptyList();
      private void ensureBlockECReconstructioninfoIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          blockECReconstructioninfo_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto>(blockECReconstructioninfo_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder> blockECReconstructioninfoBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto> getBlockECReconstructioninfoList() {
        if (blockECReconstructioninfoBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blockECReconstructioninfo_);
        } else {
          return blockECReconstructioninfoBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public int getBlockECReconstructioninfoCount() {
        if (blockECReconstructioninfoBuilder_ == null) {
          return blockECReconstructioninfo_.size();
        } else {
          return blockECReconstructioninfoBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto getBlockECReconstructioninfo(int index) {
        if (blockECReconstructioninfoBuilder_ == null) {
          return blockECReconstructioninfo_.get(index);
        } else {
          return blockECReconstructioninfoBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder setBlockECReconstructioninfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto value) {
        if (blockECReconstructioninfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.set(index, value);
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder setBlockECReconstructioninfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder builderForValue) {
        if (blockECReconstructioninfoBuilder_ == null) {
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.set(index, builderForValue.build());
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder addBlockECReconstructioninfo(org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto value) {
        if (blockECReconstructioninfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.add(value);
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder addBlockECReconstructioninfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto value) {
        if (blockECReconstructioninfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.add(index, value);
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder addBlockECReconstructioninfo(
          org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder builderForValue) {
        if (blockECReconstructioninfoBuilder_ == null) {
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.add(builderForValue.build());
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder addBlockECReconstructioninfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder builderForValue) {
        if (blockECReconstructioninfoBuilder_ == null) {
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.add(index, builderForValue.build());
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder addAllBlockECReconstructioninfo(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto> values) {
        if (blockECReconstructioninfoBuilder_ == null) {
          ensureBlockECReconstructioninfoIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blockECReconstructioninfo_);
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder clearBlockECReconstructioninfo() {
        if (blockECReconstructioninfoBuilder_ == null) {
          blockECReconstructioninfo_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public Builder removeBlockECReconstructioninfo(int index) {
        if (blockECReconstructioninfoBuilder_ == null) {
          ensureBlockECReconstructioninfoIsMutable();
          blockECReconstructioninfo_.remove(index);
          onChanged();
        } else {
          blockECReconstructioninfoBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder getBlockECReconstructioninfoBuilder(
          int index) {
        return getBlockECReconstructioninfoFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder getBlockECReconstructioninfoOrBuilder(
          int index) {
        if (blockECReconstructioninfoBuilder_ == null) {
          return blockECReconstructioninfo_.get(index);  } else {
          return blockECReconstructioninfoBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder> 
           getBlockECReconstructioninfoOrBuilderList() {
        if (blockECReconstructioninfoBuilder_ != null) {
          return blockECReconstructioninfoBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blockECReconstructioninfo_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder addBlockECReconstructioninfoBuilder() {
        return getBlockECReconstructioninfoFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder addBlockECReconstructioninfoBuilder(
          int index) {
        return getBlockECReconstructioninfoFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockECReconstructionInfoProto blockECReconstructioninfo = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder> 
           getBlockECReconstructioninfoBuilderList() {
        return getBlockECReconstructioninfoFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder> 
          getBlockECReconstructioninfoFieldBuilder() {
        if (blockECReconstructioninfoBuilder_ == null) {
          blockECReconstructioninfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProtoOrBuilder>(
                  blockECReconstructioninfo_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          blockECReconstructioninfo_ = null;
        }
        return blockECReconstructioninfoBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockECReconstructionCommandProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockECReconstructionCommandProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockECReconstructionCommandProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockECReconstructionCommandProto>() {
      @java.lang.Override
      public BlockECReconstructionCommandProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockECReconstructionCommandProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockECReconstructionCommandProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RegisterDatanodeRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    boolean hasRegistration();
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
  }
  /**
   * <pre>
   **
   * registration - Information of the datanode registering with the namenode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeRequestProto}
   */
  public static final class RegisterDatanodeRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
      RegisterDatanodeRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RegisterDatanodeRequestProto.newBuilder() to construct.
    private RegisterDatanodeRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RegisterDatanodeRequestProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RegisterDatanodeRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int REGISTRATION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    @java.lang.Override
    public boolean hasRegistration() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistration()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistration().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistration());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistration());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) obj;

      if (hasRegistration() != other.hasRegistration()) return false;
      if (hasRegistration()) {
        if (!getRegistration()
            .equals(other.getRegistration())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistration()) {
        hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistration().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registration - Information of the datanode registering with the namenode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistrationFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registration_ = registrationBuilder_ == null
              ? registration_
              : registrationBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance()) return this;
        if (other.hasRegistration()) {
          mergeRegistration(other.getRegistration());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistration()) {
          return false;
        }
        if (!getRegistration().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistrationFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return Whether the registration field is set.
       */
      public boolean hasRegistration() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return The registration.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
        if (registrationBuilder_ == null) {
          return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        } else {
          return registrationBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registration_ = value;
        } else {
          registrationBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registrationBuilder_ == null) {
          registration_ = builderForValue.build();
        } else {
          registrationBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registration_ != null &&
            registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistrationBuilder().mergeFrom(value);
          } else {
            registration_ = value;
          }
        } else {
          registrationBuilder_.mergeFrom(value);
        }
        if (registration_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder clearRegistration() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistrationFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
        if (registrationBuilder_ != null) {
          return registrationBuilder_.getMessageOrBuilder();
        } else {
          return registration_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        }
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistrationFieldBuilder() {
        if (registrationBuilder_ == null) {
          registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistration(),
                  getParentForChildren(),
                  isClean());
          registration_ = null;
        }
        return registrationBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.RegisterDatanodeRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RegisterDatanodeRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RegisterDatanodeRequestProto>() {
      @java.lang.Override
      public RegisterDatanodeRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RegisterDatanodeRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RegisterDatanodeRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RegisterDatanodeResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    boolean hasRegistration();
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
  }
  /**
   * <pre>
   **
   * registration - Update registration of the datanode that successfully 
   *                registered. StorageInfo will be updated to include new 
   *                storage ID if the datanode did not have one in the request.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeResponseProto}
   */
  public static final class RegisterDatanodeResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
      RegisterDatanodeResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RegisterDatanodeResponseProto.newBuilder() to construct.
    private RegisterDatanodeResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RegisterDatanodeResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RegisterDatanodeResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int REGISTRATION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    @java.lang.Override
    public boolean hasRegistration() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistration()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistration().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistration());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistration());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) obj;

      if (hasRegistration() != other.hasRegistration()) return false;
      if (hasRegistration()) {
        if (!getRegistration()
            .equals(other.getRegistration())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistration()) {
        hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistration().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registration - Update registration of the datanode that successfully 
     *                registered. StorageInfo will be updated to include new 
     *                storage ID if the datanode did not have one in the request.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.RegisterDatanodeResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistrationFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registration_ = registrationBuilder_ == null
              ? registration_
              : registrationBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()) return this;
        if (other.hasRegistration()) {
          mergeRegistration(other.getRegistration());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistration()) {
          return false;
        }
        if (!getRegistration().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistrationFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return Whether the registration field is set.
       */
      public boolean hasRegistration() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return The registration.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
        if (registrationBuilder_ == null) {
          return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        } else {
          return registrationBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registration_ = value;
        } else {
          registrationBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registrationBuilder_ == null) {
          registration_ = builderForValue.build();
        } else {
          registrationBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registration_ != null &&
            registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistrationBuilder().mergeFrom(value);
          } else {
            registration_ = value;
          }
        } else {
          registrationBuilder_.mergeFrom(value);
        }
        if (registration_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder clearRegistration() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistrationFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
        if (registrationBuilder_ != null) {
          return registrationBuilder_.getMessageOrBuilder();
        } else {
          return registration_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        }
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistrationFieldBuilder() {
        if (registrationBuilder_ == null) {
          registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistration(),
                  getParentForChildren(),
                  isClean());
          registration_ = null;
        }
        return registrationBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.RegisterDatanodeResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RegisterDatanodeResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RegisterDatanodeResponseProto>() {
      @java.lang.Override
      public RegisterDatanodeResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RegisterDatanodeResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RegisterDatanodeResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface VolumeFailureSummaryProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @return A list containing the failedStorageLocations.
     */
    java.util.List<java.lang.String>
        getFailedStorageLocationsList();
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @return The count of failedStorageLocations.
     */
    int getFailedStorageLocationsCount();
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @param index The index of the element to return.
     * @return The failedStorageLocations at the given index.
     */
    java.lang.String getFailedStorageLocations(int index);
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the failedStorageLocations at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getFailedStorageLocationsBytes(int index);

    /**
     * <code>required uint64 lastVolumeFailureDate = 2;</code>
     * @return Whether the lastVolumeFailureDate field is set.
     */
    boolean hasLastVolumeFailureDate();
    /**
     * <code>required uint64 lastVolumeFailureDate = 2;</code>
     * @return The lastVolumeFailureDate.
     */
    long getLastVolumeFailureDate();

    /**
     * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
     * @return Whether the estimatedCapacityLostTotal field is set.
     */
    boolean hasEstimatedCapacityLostTotal();
    /**
     * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
     * @return The estimatedCapacityLostTotal.
     */
    long getEstimatedCapacityLostTotal();
  }
  /**
   * <pre>
   **
   * failedStorageLocations - storage locations that have failed
   * lastVolumeFailureDate - date/time of last volume failure
   * estimatedCapacityLost - estimate of total capacity lost due to volume failures
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.VolumeFailureSummaryProto}
   */
  public static final class VolumeFailureSummaryProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
      VolumeFailureSummaryProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use VolumeFailureSummaryProto.newBuilder() to construct.
    private VolumeFailureSummaryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private VolumeFailureSummaryProto() {
      failedStorageLocations_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new VolumeFailureSummaryProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder.class);
    }

    private int bitField0_;
    public static final int FAILEDSTORAGELOCATIONS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList failedStorageLocations_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @return A list containing the failedStorageLocations.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getFailedStorageLocationsList() {
      return failedStorageLocations_;
    }
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @return The count of failedStorageLocations.
     */
    public int getFailedStorageLocationsCount() {
      return failedStorageLocations_.size();
    }
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @param index The index of the element to return.
     * @return The failedStorageLocations at the given index.
     */
    public java.lang.String getFailedStorageLocations(int index) {
      return failedStorageLocations_.get(index);
    }
    /**
     * <code>repeated string failedStorageLocations = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the failedStorageLocations at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getFailedStorageLocationsBytes(int index) {
      return failedStorageLocations_.getByteString(index);
    }

    public static final int LASTVOLUMEFAILUREDATE_FIELD_NUMBER = 2;
    private long lastVolumeFailureDate_ = 0L;
    /**
     * <code>required uint64 lastVolumeFailureDate = 2;</code>
     * @return Whether the lastVolumeFailureDate field is set.
     */
    @java.lang.Override
    public boolean hasLastVolumeFailureDate() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 lastVolumeFailureDate = 2;</code>
     * @return The lastVolumeFailureDate.
     */
    @java.lang.Override
    public long getLastVolumeFailureDate() {
      return lastVolumeFailureDate_;
    }

    public static final int ESTIMATEDCAPACITYLOSTTOTAL_FIELD_NUMBER = 3;
    private long estimatedCapacityLostTotal_ = 0L;
    /**
     * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
     * @return Whether the estimatedCapacityLostTotal field is set.
     */
    @java.lang.Override
    public boolean hasEstimatedCapacityLostTotal() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
     * @return The estimatedCapacityLostTotal.
     */
    @java.lang.Override
    public long getEstimatedCapacityLostTotal() {
      return estimatedCapacityLostTotal_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasLastVolumeFailureDate()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEstimatedCapacityLostTotal()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < failedStorageLocations_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, failedStorageLocations_.getRaw(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(2, lastVolumeFailureDate_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(3, estimatedCapacityLostTotal_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < failedStorageLocations_.size(); i++) {
          dataSize += computeStringSizeNoTag(failedStorageLocations_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getFailedStorageLocationsList().size();
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, lastVolumeFailureDate_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, estimatedCapacityLostTotal_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto) obj;

      if (!getFailedStorageLocationsList()
          .equals(other.getFailedStorageLocationsList())) return false;
      if (hasLastVolumeFailureDate() != other.hasLastVolumeFailureDate()) return false;
      if (hasLastVolumeFailureDate()) {
        if (getLastVolumeFailureDate()
            != other.getLastVolumeFailureDate()) return false;
      }
      if (hasEstimatedCapacityLostTotal() != other.hasEstimatedCapacityLostTotal()) return false;
      if (hasEstimatedCapacityLostTotal()) {
        if (getEstimatedCapacityLostTotal()
            != other.getEstimatedCapacityLostTotal()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getFailedStorageLocationsCount() > 0) {
        hash = (37 * hash) + FAILEDSTORAGELOCATIONS_FIELD_NUMBER;
        hash = (53 * hash) + getFailedStorageLocationsList().hashCode();
      }
      if (hasLastVolumeFailureDate()) {
        hash = (37 * hash) + LASTVOLUMEFAILUREDATE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastVolumeFailureDate());
      }
      if (hasEstimatedCapacityLostTotal()) {
        hash = (37 * hash) + ESTIMATEDCAPACITYLOSTTOTAL_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getEstimatedCapacityLostTotal());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * failedStorageLocations - storage locations that have failed
     * lastVolumeFailureDate - date/time of last volume failure
     * estimatedCapacityLost - estimate of total capacity lost due to volume failures
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.VolumeFailureSummaryProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        failedStorageLocations_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        lastVolumeFailureDate_ = 0L;
        estimatedCapacityLostTotal_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          failedStorageLocations_.makeImmutable();
          result.failedStorageLocations_ = failedStorageLocations_;
        }
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.lastVolumeFailureDate_ = lastVolumeFailureDate_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.estimatedCapacityLostTotal_ = estimatedCapacityLostTotal_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance()) return this;
        if (!other.failedStorageLocations_.isEmpty()) {
          if (failedStorageLocations_.isEmpty()) {
            failedStorageLocations_ = other.failedStorageLocations_;
            bitField0_ |= 0x00000001;
          } else {
            ensureFailedStorageLocationsIsMutable();
            failedStorageLocations_.addAll(other.failedStorageLocations_);
          }
          onChanged();
        }
        if (other.hasLastVolumeFailureDate()) {
          setLastVolumeFailureDate(other.getLastVolumeFailureDate());
        }
        if (other.hasEstimatedCapacityLostTotal()) {
          setEstimatedCapacityLostTotal(other.getEstimatedCapacityLostTotal());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasLastVolumeFailureDate()) {
          return false;
        }
        if (!hasEstimatedCapacityLostTotal()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureFailedStorageLocationsIsMutable();
                failedStorageLocations_.add(bs);
                break;
              } // case 10
              case 16: {
                lastVolumeFailureDate_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                estimatedCapacityLostTotal_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList failedStorageLocations_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureFailedStorageLocationsIsMutable() {
        if (!failedStorageLocations_.isModifiable()) {
          failedStorageLocations_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(failedStorageLocations_);
        }
        bitField0_ |= 0x00000001;
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @return A list containing the failedStorageLocations.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getFailedStorageLocationsList() {
        failedStorageLocations_.makeImmutable();
        return failedStorageLocations_;
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @return The count of failedStorageLocations.
       */
      public int getFailedStorageLocationsCount() {
        return failedStorageLocations_.size();
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @param index The index of the element to return.
       * @return The failedStorageLocations at the given index.
       */
      public java.lang.String getFailedStorageLocations(int index) {
        return failedStorageLocations_.get(index);
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @param index The index of the value to return.
       * @return The bytes of the failedStorageLocations at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getFailedStorageLocationsBytes(int index) {
        return failedStorageLocations_.getByteString(index);
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @param index The index to set the value at.
       * @param value The failedStorageLocations to set.
       * @return This builder for chaining.
       */
      public Builder setFailedStorageLocations(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureFailedStorageLocationsIsMutable();
        failedStorageLocations_.set(index, value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @param value The failedStorageLocations to add.
       * @return This builder for chaining.
       */
      public Builder addFailedStorageLocations(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureFailedStorageLocationsIsMutable();
        failedStorageLocations_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @param values The failedStorageLocations to add.
       * @return This builder for chaining.
       */
      public Builder addAllFailedStorageLocations(
          java.lang.Iterable<java.lang.String> values) {
        ensureFailedStorageLocationsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, failedStorageLocations_);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFailedStorageLocations() {
        failedStorageLocations_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string failedStorageLocations = 1;</code>
       * @param value The bytes of the failedStorageLocations to add.
       * @return This builder for chaining.
       */
      public Builder addFailedStorageLocationsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureFailedStorageLocationsIsMutable();
        failedStorageLocations_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long lastVolumeFailureDate_ ;
      /**
       * <code>required uint64 lastVolumeFailureDate = 2;</code>
       * @return Whether the lastVolumeFailureDate field is set.
       */
      @java.lang.Override
      public boolean hasLastVolumeFailureDate() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 lastVolumeFailureDate = 2;</code>
       * @return The lastVolumeFailureDate.
       */
      @java.lang.Override
      public long getLastVolumeFailureDate() {
        return lastVolumeFailureDate_;
      }
      /**
       * <code>required uint64 lastVolumeFailureDate = 2;</code>
       * @param value The lastVolumeFailureDate to set.
       * @return This builder for chaining.
       */
      public Builder setLastVolumeFailureDate(long value) {

        lastVolumeFailureDate_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 lastVolumeFailureDate = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearLastVolumeFailureDate() {
        bitField0_ = (bitField0_ & ~0x00000002);
        lastVolumeFailureDate_ = 0L;
        onChanged();
        return this;
      }

      private long estimatedCapacityLostTotal_ ;
      /**
       * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
       * @return Whether the estimatedCapacityLostTotal field is set.
       */
      @java.lang.Override
      public boolean hasEstimatedCapacityLostTotal() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
       * @return The estimatedCapacityLostTotal.
       */
      @java.lang.Override
      public long getEstimatedCapacityLostTotal() {
        return estimatedCapacityLostTotal_;
      }
      /**
       * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
       * @param value The estimatedCapacityLostTotal to set.
       * @return This builder for chaining.
       */
      public Builder setEstimatedCapacityLostTotal(long value) {

        estimatedCapacityLostTotal_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 estimatedCapacityLostTotal = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearEstimatedCapacityLostTotal() {
        bitField0_ = (bitField0_ & ~0x00000004);
        estimatedCapacityLostTotal_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.VolumeFailureSummaryProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<VolumeFailureSummaryProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<VolumeFailureSummaryProto>() {
      @java.lang.Override
      public VolumeFailureSummaryProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<VolumeFailureSummaryProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<VolumeFailureSummaryProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface HeartbeatRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.HeartbeatRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    boolean hasRegistration();
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();

    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> 
        getReportsList();
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getReports(int index);
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    int getReportsCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> 
        getReportsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
        int index);

    /**
     * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
     * @return Whether the xmitsInProgress field is set.
     */
    boolean hasXmitsInProgress();
    /**
     * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
     * @return The xmitsInProgress.
     */
    int getXmitsInProgress();

    /**
     * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
     * @return Whether the xceiverCount field is set.
     */
    boolean hasXceiverCount();
    /**
     * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
     * @return The xceiverCount.
     */
    int getXceiverCount();

    /**
     * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
     * @return Whether the failedVolumes field is set.
     */
    boolean hasFailedVolumes();
    /**
     * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
     * @return The failedVolumes.
     */
    int getFailedVolumes();

    /**
     * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
     * @return Whether the cacheCapacity field is set.
     */
    boolean hasCacheCapacity();
    /**
     * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
     * @return The cacheCapacity.
     */
    long getCacheCapacity();

    /**
     * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
     * @return Whether the cacheUsed field is set.
     */
    boolean hasCacheUsed();
    /**
     * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
     * @return The cacheUsed.
     */
    long getCacheUsed();

    /**
     * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
     * @return Whether the volumeFailureSummary field is set.
     */
    boolean hasVolumeFailureSummary();
    /**
     * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
     * @return The volumeFailureSummary.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getVolumeFailureSummary();
    /**
     * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder getVolumeFailureSummaryOrBuilder();

    /**
     * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
     * @return Whether the requestFullBlockReportLease field is set.
     */
    boolean hasRequestFullBlockReportLease();
    /**
     * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
     * @return The requestFullBlockReportLease.
     */
    boolean getRequestFullBlockReportLease();

    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> 
        getSlowPeersList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getSlowPeers(int index);
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    int getSlowPeersCount();
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder> 
        getSlowPeersOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder getSlowPeersOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> 
        getSlowDisksList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getSlowDisks(int index);
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    int getSlowDisksCount();
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder> 
        getSlowDisksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder getSlowDisksOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * registration - datanode registration information
   * capacity - total storage capacity available at the datanode
   * dfsUsed - storage used by HDFS
   * remaining - remaining storage available for HDFS
   * blockPoolUsed - storage used by the block pool
   * xmitsInProgress - number of transfers from this datanode to others
   * xceiverCount - number of active transceiver threads
   * failedVolumes - number of failed volumes.  This is redundant with the
   *     information included in volumeFailureSummary, but the field is retained
   *     for backwards compatibility.
   * cacheCapacity - total cache capacity available at the datanode
   * cacheUsed - amount of cache used
   * volumeFailureSummary - info about volume failures
   * slowPeers - info about peer DataNodes that are suspected to be slow.
   * slowDisks - info about DataNode disks that are suspected to be slow.
   * blksMovementResults - status of the scheduled blocks movements
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.HeartbeatRequestProto}
   */
  public static final class HeartbeatRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.HeartbeatRequestProto)
      HeartbeatRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use HeartbeatRequestProto.newBuilder() to construct.
    private HeartbeatRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private HeartbeatRequestProto() {
      reports_ = java.util.Collections.emptyList();
      slowPeers_ = java.util.Collections.emptyList();
      slowDisks_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new HeartbeatRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int REGISTRATION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    @java.lang.Override
    public boolean hasRegistration() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }
    /**
     * <pre>
     * Datanode info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }

    public static final int REPORTS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> reports_;
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> getReportsList() {
      return reports_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> 
        getReportsOrBuilderList() {
      return reports_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    @java.lang.Override
    public int getReportsCount() {
      return reports_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getReports(int index) {
      return reports_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
        int index) {
      return reports_.get(index);
    }

    public static final int XMITSINPROGRESS_FIELD_NUMBER = 3;
    private int xmitsInProgress_ = 0;
    /**
     * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
     * @return Whether the xmitsInProgress field is set.
     */
    @java.lang.Override
    public boolean hasXmitsInProgress() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
     * @return The xmitsInProgress.
     */
    @java.lang.Override
    public int getXmitsInProgress() {
      return xmitsInProgress_;
    }

    public static final int XCEIVERCOUNT_FIELD_NUMBER = 4;
    private int xceiverCount_ = 0;
    /**
     * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
     * @return Whether the xceiverCount field is set.
     */
    @java.lang.Override
    public boolean hasXceiverCount() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
     * @return The xceiverCount.
     */
    @java.lang.Override
    public int getXceiverCount() {
      return xceiverCount_;
    }

    public static final int FAILEDVOLUMES_FIELD_NUMBER = 5;
    private int failedVolumes_ = 0;
    /**
     * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
     * @return Whether the failedVolumes field is set.
     */
    @java.lang.Override
    public boolean hasFailedVolumes() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
     * @return The failedVolumes.
     */
    @java.lang.Override
    public int getFailedVolumes() {
      return failedVolumes_;
    }

    public static final int CACHECAPACITY_FIELD_NUMBER = 6;
    private long cacheCapacity_ = 0L;
    /**
     * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
     * @return Whether the cacheCapacity field is set.
     */
    @java.lang.Override
    public boolean hasCacheCapacity() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
     * @return The cacheCapacity.
     */
    @java.lang.Override
    public long getCacheCapacity() {
      return cacheCapacity_;
    }

    public static final int CACHEUSED_FIELD_NUMBER = 7;
    private long cacheUsed_ = 0L;
    /**
     * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
     * @return Whether the cacheUsed field is set.
     */
    @java.lang.Override
    public boolean hasCacheUsed() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
     * @return The cacheUsed.
     */
    @java.lang.Override
    public long getCacheUsed() {
      return cacheUsed_;
    }

    public static final int VOLUMEFAILURESUMMARY_FIELD_NUMBER = 8;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto volumeFailureSummary_;
    /**
     * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
     * @return Whether the volumeFailureSummary field is set.
     */
    @java.lang.Override
    public boolean hasVolumeFailureSummary() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
     * @return The volumeFailureSummary.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getVolumeFailureSummary() {
      return volumeFailureSummary_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance() : volumeFailureSummary_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder getVolumeFailureSummaryOrBuilder() {
      return volumeFailureSummary_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance() : volumeFailureSummary_;
    }

    public static final int REQUESTFULLBLOCKREPORTLEASE_FIELD_NUMBER = 9;
    private boolean requestFullBlockReportLease_ = false;
    /**
     * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
     * @return Whether the requestFullBlockReportLease field is set.
     */
    @java.lang.Override
    public boolean hasRequestFullBlockReportLease() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
     * @return The requestFullBlockReportLease.
     */
    @java.lang.Override
    public boolean getRequestFullBlockReportLease() {
      return requestFullBlockReportLease_;
    }

    public static final int SLOWPEERS_FIELD_NUMBER = 10;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> slowPeers_;
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> getSlowPeersList() {
      return slowPeers_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder> 
        getSlowPeersOrBuilderList() {
      return slowPeers_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    @java.lang.Override
    public int getSlowPeersCount() {
      return slowPeers_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getSlowPeers(int index) {
      return slowPeers_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder getSlowPeersOrBuilder(
        int index) {
      return slowPeers_.get(index);
    }

    public static final int SLOWDISKS_FIELD_NUMBER = 11;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> slowDisks_;
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> getSlowDisksList() {
      return slowDisks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder> 
        getSlowDisksOrBuilderList() {
      return slowDisks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    @java.lang.Override
    public int getSlowDisksCount() {
      return slowDisks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getSlowDisks(int index) {
      return slowDisks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder getSlowDisksOrBuilder(
        int index) {
      return slowDisks_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistration()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistration().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getReportsCount(); i++) {
        if (!getReports(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasVolumeFailureSummary()) {
        if (!getVolumeFailureSummary().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistration());
      }
      for (int i = 0; i < reports_.size(); i++) {
        output.writeMessage(2, reports_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(3, xmitsInProgress_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(4, xceiverCount_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt32(5, failedVolumes_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(6, cacheCapacity_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(7, cacheUsed_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeMessage(8, getVolumeFailureSummary());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeBool(9, requestFullBlockReportLease_);
      }
      for (int i = 0; i < slowPeers_.size(); i++) {
        output.writeMessage(10, slowPeers_.get(i));
      }
      for (int i = 0; i < slowDisks_.size(); i++) {
        output.writeMessage(11, slowDisks_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistration());
      }
      for (int i = 0; i < reports_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, reports_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, xmitsInProgress_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(4, xceiverCount_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(5, failedVolumes_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, cacheCapacity_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(7, cacheUsed_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(8, getVolumeFailureSummary());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(9, requestFullBlockReportLease_);
      }
      for (int i = 0; i < slowPeers_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(10, slowPeers_.get(i));
      }
      for (int i = 0; i < slowDisks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(11, slowDisks_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) obj;

      if (hasRegistration() != other.hasRegistration()) return false;
      if (hasRegistration()) {
        if (!getRegistration()
            .equals(other.getRegistration())) return false;
      }
      if (!getReportsList()
          .equals(other.getReportsList())) return false;
      if (hasXmitsInProgress() != other.hasXmitsInProgress()) return false;
      if (hasXmitsInProgress()) {
        if (getXmitsInProgress()
            != other.getXmitsInProgress()) return false;
      }
      if (hasXceiverCount() != other.hasXceiverCount()) return false;
      if (hasXceiverCount()) {
        if (getXceiverCount()
            != other.getXceiverCount()) return false;
      }
      if (hasFailedVolumes() != other.hasFailedVolumes()) return false;
      if (hasFailedVolumes()) {
        if (getFailedVolumes()
            != other.getFailedVolumes()) return false;
      }
      if (hasCacheCapacity() != other.hasCacheCapacity()) return false;
      if (hasCacheCapacity()) {
        if (getCacheCapacity()
            != other.getCacheCapacity()) return false;
      }
      if (hasCacheUsed() != other.hasCacheUsed()) return false;
      if (hasCacheUsed()) {
        if (getCacheUsed()
            != other.getCacheUsed()) return false;
      }
      if (hasVolumeFailureSummary() != other.hasVolumeFailureSummary()) return false;
      if (hasVolumeFailureSummary()) {
        if (!getVolumeFailureSummary()
            .equals(other.getVolumeFailureSummary())) return false;
      }
      if (hasRequestFullBlockReportLease() != other.hasRequestFullBlockReportLease()) return false;
      if (hasRequestFullBlockReportLease()) {
        if (getRequestFullBlockReportLease()
            != other.getRequestFullBlockReportLease()) return false;
      }
      if (!getSlowPeersList()
          .equals(other.getSlowPeersList())) return false;
      if (!getSlowDisksList()
          .equals(other.getSlowDisksList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistration()) {
        hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistration().hashCode();
      }
      if (getReportsCount() > 0) {
        hash = (37 * hash) + REPORTS_FIELD_NUMBER;
        hash = (53 * hash) + getReportsList().hashCode();
      }
      if (hasXmitsInProgress()) {
        hash = (37 * hash) + XMITSINPROGRESS_FIELD_NUMBER;
        hash = (53 * hash) + getXmitsInProgress();
      }
      if (hasXceiverCount()) {
        hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
        hash = (53 * hash) + getXceiverCount();
      }
      if (hasFailedVolumes()) {
        hash = (37 * hash) + FAILEDVOLUMES_FIELD_NUMBER;
        hash = (53 * hash) + getFailedVolumes();
      }
      if (hasCacheCapacity()) {
        hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCacheCapacity());
      }
      if (hasCacheUsed()) {
        hash = (37 * hash) + CACHEUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCacheUsed());
      }
      if (hasVolumeFailureSummary()) {
        hash = (37 * hash) + VOLUMEFAILURESUMMARY_FIELD_NUMBER;
        hash = (53 * hash) + getVolumeFailureSummary().hashCode();
      }
      if (hasRequestFullBlockReportLease()) {
        hash = (37 * hash) + REQUESTFULLBLOCKREPORTLEASE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getRequestFullBlockReportLease());
      }
      if (getSlowPeersCount() > 0) {
        hash = (37 * hash) + SLOWPEERS_FIELD_NUMBER;
        hash = (53 * hash) + getSlowPeersList().hashCode();
      }
      if (getSlowDisksCount() > 0) {
        hash = (37 * hash) + SLOWDISKS_FIELD_NUMBER;
        hash = (53 * hash) + getSlowDisksList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registration - datanode registration information
     * capacity - total storage capacity available at the datanode
     * dfsUsed - storage used by HDFS
     * remaining - remaining storage available for HDFS
     * blockPoolUsed - storage used by the block pool
     * xmitsInProgress - number of transfers from this datanode to others
     * xceiverCount - number of active transceiver threads
     * failedVolumes - number of failed volumes.  This is redundant with the
     *     information included in volumeFailureSummary, but the field is retained
     *     for backwards compatibility.
     * cacheCapacity - total cache capacity available at the datanode
     * cacheUsed - amount of cache used
     * volumeFailureSummary - info about volume failures
     * slowPeers - info about peer DataNodes that are suspected to be slow.
     * slowDisks - info about DataNode disks that are suspected to be slow.
     * blksMovementResults - status of the scheduled blocks movements
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.HeartbeatRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.HeartbeatRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistrationFieldBuilder();
          getReportsFieldBuilder();
          getVolumeFailureSummaryFieldBuilder();
          getSlowPeersFieldBuilder();
          getSlowDisksFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        if (reportsBuilder_ == null) {
          reports_ = java.util.Collections.emptyList();
        } else {
          reports_ = null;
          reportsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        xmitsInProgress_ = 0;
        xceiverCount_ = 0;
        failedVolumes_ = 0;
        cacheCapacity_ = 0L;
        cacheUsed_ = 0L;
        volumeFailureSummary_ = null;
        if (volumeFailureSummaryBuilder_ != null) {
          volumeFailureSummaryBuilder_.dispose();
          volumeFailureSummaryBuilder_ = null;
        }
        requestFullBlockReportLease_ = false;
        if (slowPeersBuilder_ == null) {
          slowPeers_ = java.util.Collections.emptyList();
        } else {
          slowPeers_ = null;
          slowPeersBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000200);
        if (slowDisksBuilder_ == null) {
          slowDisks_ = java.util.Collections.emptyList();
        } else {
          slowDisks_ = null;
          slowDisksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000400);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result) {
        if (reportsBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            reports_ = java.util.Collections.unmodifiableList(reports_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.reports_ = reports_;
        } else {
          result.reports_ = reportsBuilder_.build();
        }
        if (slowPeersBuilder_ == null) {
          if (((bitField0_ & 0x00000200) != 0)) {
            slowPeers_ = java.util.Collections.unmodifiableList(slowPeers_);
            bitField0_ = (bitField0_ & ~0x00000200);
          }
          result.slowPeers_ = slowPeers_;
        } else {
          result.slowPeers_ = slowPeersBuilder_.build();
        }
        if (slowDisksBuilder_ == null) {
          if (((bitField0_ & 0x00000400) != 0)) {
            slowDisks_ = java.util.Collections.unmodifiableList(slowDisks_);
            bitField0_ = (bitField0_ & ~0x00000400);
          }
          result.slowDisks_ = slowDisks_;
        } else {
          result.slowDisks_ = slowDisksBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registration_ = registrationBuilder_ == null
              ? registration_
              : registrationBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.xmitsInProgress_ = xmitsInProgress_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.xceiverCount_ = xceiverCount_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.failedVolumes_ = failedVolumes_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.cacheCapacity_ = cacheCapacity_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.cacheUsed_ = cacheUsed_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.volumeFailureSummary_ = volumeFailureSummaryBuilder_ == null
              ? volumeFailureSummary_
              : volumeFailureSummaryBuilder_.build();
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.requestFullBlockReportLease_ = requestFullBlockReportLease_;
          to_bitField0_ |= 0x00000080;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
        if (other.hasRegistration()) {
          mergeRegistration(other.getRegistration());
        }
        if (reportsBuilder_ == null) {
          if (!other.reports_.isEmpty()) {
            if (reports_.isEmpty()) {
              reports_ = other.reports_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureReportsIsMutable();
              reports_.addAll(other.reports_);
            }
            onChanged();
          }
        } else {
          if (!other.reports_.isEmpty()) {
            if (reportsBuilder_.isEmpty()) {
              reportsBuilder_.dispose();
              reportsBuilder_ = null;
              reports_ = other.reports_;
              bitField0_ = (bitField0_ & ~0x00000002);
              reportsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getReportsFieldBuilder() : null;
            } else {
              reportsBuilder_.addAllMessages(other.reports_);
            }
          }
        }
        if (other.hasXmitsInProgress()) {
          setXmitsInProgress(other.getXmitsInProgress());
        }
        if (other.hasXceiverCount()) {
          setXceiverCount(other.getXceiverCount());
        }
        if (other.hasFailedVolumes()) {
          setFailedVolumes(other.getFailedVolumes());
        }
        if (other.hasCacheCapacity()) {
          setCacheCapacity(other.getCacheCapacity());
        }
        if (other.hasCacheUsed()) {
          setCacheUsed(other.getCacheUsed());
        }
        if (other.hasVolumeFailureSummary()) {
          mergeVolumeFailureSummary(other.getVolumeFailureSummary());
        }
        if (other.hasRequestFullBlockReportLease()) {
          setRequestFullBlockReportLease(other.getRequestFullBlockReportLease());
        }
        if (slowPeersBuilder_ == null) {
          if (!other.slowPeers_.isEmpty()) {
            if (slowPeers_.isEmpty()) {
              slowPeers_ = other.slowPeers_;
              bitField0_ = (bitField0_ & ~0x00000200);
            } else {
              ensureSlowPeersIsMutable();
              slowPeers_.addAll(other.slowPeers_);
            }
            onChanged();
          }
        } else {
          if (!other.slowPeers_.isEmpty()) {
            if (slowPeersBuilder_.isEmpty()) {
              slowPeersBuilder_.dispose();
              slowPeersBuilder_ = null;
              slowPeers_ = other.slowPeers_;
              bitField0_ = (bitField0_ & ~0x00000200);
              slowPeersBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getSlowPeersFieldBuilder() : null;
            } else {
              slowPeersBuilder_.addAllMessages(other.slowPeers_);
            }
          }
        }
        if (slowDisksBuilder_ == null) {
          if (!other.slowDisks_.isEmpty()) {
            if (slowDisks_.isEmpty()) {
              slowDisks_ = other.slowDisks_;
              bitField0_ = (bitField0_ & ~0x00000400);
            } else {
              ensureSlowDisksIsMutable();
              slowDisks_.addAll(other.slowDisks_);
            }
            onChanged();
          }
        } else {
          if (!other.slowDisks_.isEmpty()) {
            if (slowDisksBuilder_.isEmpty()) {
              slowDisksBuilder_.dispose();
              slowDisksBuilder_ = null;
              slowDisks_ = other.slowDisks_;
              bitField0_ = (bitField0_ & ~0x00000400);
              slowDisksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getSlowDisksFieldBuilder() : null;
            } else {
              slowDisksBuilder_.addAllMessages(other.slowDisks_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistration()) {
          return false;
        }
        if (!getRegistration().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getReportsCount(); i++) {
          if (!getReports(i).isInitialized()) {
            return false;
          }
        }
        if (hasVolumeFailureSummary()) {
          if (!getVolumeFailureSummary().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistrationFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.PARSER,
                        extensionRegistry);
                if (reportsBuilder_ == null) {
                  ensureReportsIsMutable();
                  reports_.add(m);
                } else {
                  reportsBuilder_.addMessage(m);
                }
                break;
              } // case 18
              case 24: {
                xmitsInProgress_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                xceiverCount_ = input.readUInt32();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                failedVolumes_ = input.readUInt32();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                cacheCapacity_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                cacheUsed_ = input.readUInt64();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 66: {
                input.readMessage(
                    getVolumeFailureSummaryFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000080;
                break;
              } // case 66
              case 72: {
                requestFullBlockReportLease_ = input.readBool();
                bitField0_ |= 0x00000100;
                break;
              } // case 72
              case 82: {
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.PARSER,
                        extensionRegistry);
                if (slowPeersBuilder_ == null) {
                  ensureSlowPeersIsMutable();
                  slowPeers_.add(m);
                } else {
                  slowPeersBuilder_.addMessage(m);
                }
                break;
              } // case 82
              case 90: {
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.PARSER,
                        extensionRegistry);
                if (slowDisksBuilder_ == null) {
                  ensureSlowDisksIsMutable();
                  slowDisks_.add(m);
                } else {
                  slowDisksBuilder_.addMessage(m);
                }
                break;
              } // case 90
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return Whether the registration field is set.
       */
      public boolean hasRegistration() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return The registration.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
        if (registrationBuilder_ == null) {
          return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        } else {
          return registrationBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registration_ = value;
        } else {
          registrationBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registrationBuilder_ == null) {
          registration_ = builderForValue.build();
        } else {
          registrationBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registration_ != null &&
            registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistrationBuilder().mergeFrom(value);
          } else {
            registration_ = value;
          }
        } else {
          registrationBuilder_.mergeFrom(value);
        }
        if (registration_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder clearRegistration() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistrationFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
        if (registrationBuilder_ != null) {
          return registrationBuilder_.getMessageOrBuilder();
        } else {
          return registration_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        }
      }
      /**
       * <pre>
       * Datanode info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistrationFieldBuilder() {
        if (registrationBuilder_ == null) {
          registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistration(),
                  getParentForChildren(),
                  isClean());
          registration_ = null;
        }
        return registrationBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> reports_ =
        java.util.Collections.emptyList();
      private void ensureReportsIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          reports_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto>(reports_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> reportsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> getReportsList() {
        if (reportsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(reports_);
        } else {
          return reportsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public int getReportsCount() {
        if (reportsBuilder_ == null) {
          return reports_.size();
        } else {
          return reportsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getReports(int index) {
        if (reportsBuilder_ == null) {
          return reports_.get(index);
        } else {
          return reportsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder setReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) {
        if (reportsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureReportsIsMutable();
          reports_.set(index, value);
          onChanged();
        } else {
          reportsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder setReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.set(index, builderForValue.build());
          onChanged();
        } else {
          reportsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder addReports(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) {
        if (reportsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureReportsIsMutable();
          reports_.add(value);
          onChanged();
        } else {
          reportsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder addReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto value) {
        if (reportsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureReportsIsMutable();
          reports_.add(index, value);
          onChanged();
        } else {
          reportsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder addReports(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.add(builderForValue.build());
          onChanged();
        } else {
          reportsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder addReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder builderForValue) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.add(index, builderForValue.build());
          onChanged();
        } else {
          reportsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder addAllReports(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto> values) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, reports_);
          onChanged();
        } else {
          reportsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder clearReports() {
        if (reportsBuilder_ == null) {
          reports_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          reportsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public Builder removeReports(int index) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.remove(index);
          onChanged();
        } else {
          reportsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder getReportsBuilder(
          int index) {
        return getReportsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder getReportsOrBuilder(
          int index) {
        if (reportsBuilder_ == null) {
          return reports_.get(index);  } else {
          return reportsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> 
           getReportsOrBuilderList() {
        if (reportsBuilder_ != null) {
          return reportsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(reports_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder addReportsBuilder() {
        return getReportsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder addReportsBuilder(
          int index) {
        return getReportsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageReportProto reports = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder> 
           getReportsBuilderList() {
        return getReportsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder> 
          getReportsFieldBuilder() {
        if (reportsBuilder_ == null) {
          reportsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder>(
                  reports_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          reports_ = null;
        }
        return reportsBuilder_;
      }

      private int xmitsInProgress_ ;
      /**
       * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
       * @return Whether the xmitsInProgress field is set.
       */
      @java.lang.Override
      public boolean hasXmitsInProgress() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
       * @return The xmitsInProgress.
       */
      @java.lang.Override
      public int getXmitsInProgress() {
        return xmitsInProgress_;
      }
      /**
       * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
       * @param value The xmitsInProgress to set.
       * @return This builder for chaining.
       */
      public Builder setXmitsInProgress(int value) {

        xmitsInProgress_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 xmitsInProgress = 3 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearXmitsInProgress() {
        bitField0_ = (bitField0_ & ~0x00000004);
        xmitsInProgress_ = 0;
        onChanged();
        return this;
      }

      private int xceiverCount_ ;
      /**
       * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
       * @return Whether the xceiverCount field is set.
       */
      @java.lang.Override
      public boolean hasXceiverCount() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
       * @return The xceiverCount.
       */
      @java.lang.Override
      public int getXceiverCount() {
        return xceiverCount_;
      }
      /**
       * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
       * @param value The xceiverCount to set.
       * @return This builder for chaining.
       */
      public Builder setXceiverCount(int value) {

        xceiverCount_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 xceiverCount = 4 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearXceiverCount() {
        bitField0_ = (bitField0_ & ~0x00000008);
        xceiverCount_ = 0;
        onChanged();
        return this;
      }

      private int failedVolumes_ ;
      /**
       * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
       * @return Whether the failedVolumes field is set.
       */
      @java.lang.Override
      public boolean hasFailedVolumes() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
       * @return The failedVolumes.
       */
      @java.lang.Override
      public int getFailedVolumes() {
        return failedVolumes_;
      }
      /**
       * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
       * @param value The failedVolumes to set.
       * @return This builder for chaining.
       */
      public Builder setFailedVolumes(int value) {

        failedVolumes_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 failedVolumes = 5 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearFailedVolumes() {
        bitField0_ = (bitField0_ & ~0x00000010);
        failedVolumes_ = 0;
        onChanged();
        return this;
      }

      private long cacheCapacity_ ;
      /**
       * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
       * @return Whether the cacheCapacity field is set.
       */
      @java.lang.Override
      public boolean hasCacheCapacity() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
       * @return The cacheCapacity.
       */
      @java.lang.Override
      public long getCacheCapacity() {
        return cacheCapacity_;
      }
      /**
       * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
       * @param value The cacheCapacity to set.
       * @return This builder for chaining.
       */
      public Builder setCacheCapacity(long value) {

        cacheCapacity_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 cacheCapacity = 6 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearCacheCapacity() {
        bitField0_ = (bitField0_ & ~0x00000020);
        cacheCapacity_ = 0L;
        onChanged();
        return this;
      }

      private long cacheUsed_ ;
      /**
       * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
       * @return Whether the cacheUsed field is set.
       */
      @java.lang.Override
      public boolean hasCacheUsed() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
       * @return The cacheUsed.
       */
      @java.lang.Override
      public long getCacheUsed() {
        return cacheUsed_;
      }
      /**
       * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
       * @param value The cacheUsed to set.
       * @return This builder for chaining.
       */
      public Builder setCacheUsed(long value) {

        cacheUsed_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 cacheUsed = 7 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearCacheUsed() {
        bitField0_ = (bitField0_ & ~0x00000040);
        cacheUsed_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto volumeFailureSummary_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder> volumeFailureSummaryBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       * @return Whether the volumeFailureSummary field is set.
       */
      public boolean hasVolumeFailureSummary() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       * @return The volumeFailureSummary.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto getVolumeFailureSummary() {
        if (volumeFailureSummaryBuilder_ == null) {
          return volumeFailureSummary_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance() : volumeFailureSummary_;
        } else {
          return volumeFailureSummaryBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      public Builder setVolumeFailureSummary(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto value) {
        if (volumeFailureSummaryBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          volumeFailureSummary_ = value;
        } else {
          volumeFailureSummaryBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      public Builder setVolumeFailureSummary(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder builderForValue) {
        if (volumeFailureSummaryBuilder_ == null) {
          volumeFailureSummary_ = builderForValue.build();
        } else {
          volumeFailureSummaryBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      public Builder mergeVolumeFailureSummary(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto value) {
        if (volumeFailureSummaryBuilder_ == null) {
          if (((bitField0_ & 0x00000080) != 0) &&
            volumeFailureSummary_ != null &&
            volumeFailureSummary_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance()) {
            getVolumeFailureSummaryBuilder().mergeFrom(value);
          } else {
            volumeFailureSummary_ = value;
          }
        } else {
          volumeFailureSummaryBuilder_.mergeFrom(value);
        }
        if (volumeFailureSummary_ != null) {
          bitField0_ |= 0x00000080;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      public Builder clearVolumeFailureSummary() {
        bitField0_ = (bitField0_ & ~0x00000080);
        volumeFailureSummary_ = null;
        if (volumeFailureSummaryBuilder_ != null) {
          volumeFailureSummaryBuilder_.dispose();
          volumeFailureSummaryBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder getVolumeFailureSummaryBuilder() {
        bitField0_ |= 0x00000080;
        onChanged();
        return getVolumeFailureSummaryFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder getVolumeFailureSummaryOrBuilder() {
        if (volumeFailureSummaryBuilder_ != null) {
          return volumeFailureSummaryBuilder_.getMessageOrBuilder();
        } else {
          return volumeFailureSummary_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.getDefaultInstance() : volumeFailureSummary_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.VolumeFailureSummaryProto volumeFailureSummary = 8;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder> 
          getVolumeFailureSummaryFieldBuilder() {
        if (volumeFailureSummaryBuilder_ == null) {
          volumeFailureSummaryBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProtoOrBuilder>(
                  getVolumeFailureSummary(),
                  getParentForChildren(),
                  isClean());
          volumeFailureSummary_ = null;
        }
        return volumeFailureSummaryBuilder_;
      }

      private boolean requestFullBlockReportLease_ ;
      /**
       * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
       * @return Whether the requestFullBlockReportLease field is set.
       */
      @java.lang.Override
      public boolean hasRequestFullBlockReportLease() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
       * @return The requestFullBlockReportLease.
       */
      @java.lang.Override
      public boolean getRequestFullBlockReportLease() {
        return requestFullBlockReportLease_;
      }
      /**
       * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
       * @param value The requestFullBlockReportLease to set.
       * @return This builder for chaining.
       */
      public Builder setRequestFullBlockReportLease(boolean value) {

        requestFullBlockReportLease_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool requestFullBlockReportLease = 9 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearRequestFullBlockReportLease() {
        bitField0_ = (bitField0_ & ~0x00000100);
        requestFullBlockReportLease_ = false;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> slowPeers_ =
        java.util.Collections.emptyList();
      private void ensureSlowPeersIsMutable() {
        if (!((bitField0_ & 0x00000200) != 0)) {
          slowPeers_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto>(slowPeers_);
          bitField0_ |= 0x00000200;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder> slowPeersBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> getSlowPeersList() {
        if (slowPeersBuilder_ == null) {
          return java.util.Collections.unmodifiableList(slowPeers_);
        } else {
          return slowPeersBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public int getSlowPeersCount() {
        if (slowPeersBuilder_ == null) {
          return slowPeers_.size();
        } else {
          return slowPeersBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getSlowPeers(int index) {
        if (slowPeersBuilder_ == null) {
          return slowPeers_.get(index);
        } else {
          return slowPeersBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder setSlowPeers(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto value) {
        if (slowPeersBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSlowPeersIsMutable();
          slowPeers_.set(index, value);
          onChanged();
        } else {
          slowPeersBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder setSlowPeers(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder builderForValue) {
        if (slowPeersBuilder_ == null) {
          ensureSlowPeersIsMutable();
          slowPeers_.set(index, builderForValue.build());
          onChanged();
        } else {
          slowPeersBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder addSlowPeers(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto value) {
        if (slowPeersBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSlowPeersIsMutable();
          slowPeers_.add(value);
          onChanged();
        } else {
          slowPeersBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder addSlowPeers(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto value) {
        if (slowPeersBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSlowPeersIsMutable();
          slowPeers_.add(index, value);
          onChanged();
        } else {
          slowPeersBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder addSlowPeers(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder builderForValue) {
        if (slowPeersBuilder_ == null) {
          ensureSlowPeersIsMutable();
          slowPeers_.add(builderForValue.build());
          onChanged();
        } else {
          slowPeersBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder addSlowPeers(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder builderForValue) {
        if (slowPeersBuilder_ == null) {
          ensureSlowPeersIsMutable();
          slowPeers_.add(index, builderForValue.build());
          onChanged();
        } else {
          slowPeersBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder addAllSlowPeers(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto> values) {
        if (slowPeersBuilder_ == null) {
          ensureSlowPeersIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, slowPeers_);
          onChanged();
        } else {
          slowPeersBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder clearSlowPeers() {
        if (slowPeersBuilder_ == null) {
          slowPeers_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000200);
          onChanged();
        } else {
          slowPeersBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public Builder removeSlowPeers(int index) {
        if (slowPeersBuilder_ == null) {
          ensureSlowPeersIsMutable();
          slowPeers_.remove(index);
          onChanged();
        } else {
          slowPeersBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder getSlowPeersBuilder(
          int index) {
        return getSlowPeersFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder getSlowPeersOrBuilder(
          int index) {
        if (slowPeersBuilder_ == null) {
          return slowPeers_.get(index);  } else {
          return slowPeersBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder> 
           getSlowPeersOrBuilderList() {
        if (slowPeersBuilder_ != null) {
          return slowPeersBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(slowPeers_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder addSlowPeersBuilder() {
        return getSlowPeersFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder addSlowPeersBuilder(
          int index) {
        return getSlowPeersFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowPeerReportProto slowPeers = 10;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder> 
           getSlowPeersBuilderList() {
        return getSlowPeersFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder> 
          getSlowPeersFieldBuilder() {
        if (slowPeersBuilder_ == null) {
          slowPeersBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder>(
                  slowPeers_,
                  ((bitField0_ & 0x00000200) != 0),
                  getParentForChildren(),
                  isClean());
          slowPeers_ = null;
        }
        return slowPeersBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> slowDisks_ =
        java.util.Collections.emptyList();
      private void ensureSlowDisksIsMutable() {
        if (!((bitField0_ & 0x00000400) != 0)) {
          slowDisks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto>(slowDisks_);
          bitField0_ |= 0x00000400;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder> slowDisksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> getSlowDisksList() {
        if (slowDisksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(slowDisks_);
        } else {
          return slowDisksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public int getSlowDisksCount() {
        if (slowDisksBuilder_ == null) {
          return slowDisks_.size();
        } else {
          return slowDisksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getSlowDisks(int index) {
        if (slowDisksBuilder_ == null) {
          return slowDisks_.get(index);
        } else {
          return slowDisksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder setSlowDisks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto value) {
        if (slowDisksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSlowDisksIsMutable();
          slowDisks_.set(index, value);
          onChanged();
        } else {
          slowDisksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder setSlowDisks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder builderForValue) {
        if (slowDisksBuilder_ == null) {
          ensureSlowDisksIsMutable();
          slowDisks_.set(index, builderForValue.build());
          onChanged();
        } else {
          slowDisksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder addSlowDisks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto value) {
        if (slowDisksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSlowDisksIsMutable();
          slowDisks_.add(value);
          onChanged();
        } else {
          slowDisksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder addSlowDisks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto value) {
        if (slowDisksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSlowDisksIsMutable();
          slowDisks_.add(index, value);
          onChanged();
        } else {
          slowDisksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder addSlowDisks(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder builderForValue) {
        if (slowDisksBuilder_ == null) {
          ensureSlowDisksIsMutable();
          slowDisks_.add(builderForValue.build());
          onChanged();
        } else {
          slowDisksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder addSlowDisks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder builderForValue) {
        if (slowDisksBuilder_ == null) {
          ensureSlowDisksIsMutable();
          slowDisks_.add(index, builderForValue.build());
          onChanged();
        } else {
          slowDisksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder addAllSlowDisks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto> values) {
        if (slowDisksBuilder_ == null) {
          ensureSlowDisksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, slowDisks_);
          onChanged();
        } else {
          slowDisksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder clearSlowDisks() {
        if (slowDisksBuilder_ == null) {
          slowDisks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000400);
          onChanged();
        } else {
          slowDisksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public Builder removeSlowDisks(int index) {
        if (slowDisksBuilder_ == null) {
          ensureSlowDisksIsMutable();
          slowDisks_.remove(index);
          onChanged();
        } else {
          slowDisksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder getSlowDisksBuilder(
          int index) {
        return getSlowDisksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder getSlowDisksOrBuilder(
          int index) {
        if (slowDisksBuilder_ == null) {
          return slowDisks_.get(index);  } else {
          return slowDisksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder> 
           getSlowDisksOrBuilderList() {
        if (slowDisksBuilder_ != null) {
          return slowDisksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(slowDisks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder addSlowDisksBuilder() {
        return getSlowDisksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder addSlowDisksBuilder(
          int index) {
        return getSlowDisksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.SlowDiskReportProto slowDisks = 11;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder> 
           getSlowDisksBuilderList() {
        return getSlowDisksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder> 
          getSlowDisksFieldBuilder() {
        if (slowDisksBuilder_ == null) {
          slowDisksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder>(
                  slowDisks_,
                  ((bitField0_ & 0x00000400) != 0),
                  getParentForChildren(),
                  isClean());
          slowDisks_ = null;
        }
        return slowDisksBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.HeartbeatRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.HeartbeatRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<HeartbeatRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<HeartbeatRequestProto>() {
      @java.lang.Override
      public HeartbeatRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<HeartbeatRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<HeartbeatRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface HeartbeatResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.HeartbeatResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> 
        getCmdsList();
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index);
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    int getCmdsCount();
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> 
        getCmdsOrBuilderList();
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
        int index);

    /**
     * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
     * @return Whether the haStatus field is set.
     */
    boolean hasHaStatus();
    /**
     * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
     * @return The haStatus.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getHaStatus();
    /**
     * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
     * @return Whether the rollingUpgradeStatus field is set.
     */
    boolean hasRollingUpgradeStatus();
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
     * @return The rollingUpgradeStatus.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatus();
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
     * @return Whether the rollingUpgradeStatusV2 field is set.
     */
    boolean hasRollingUpgradeStatusV2();
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
     * @return The rollingUpgradeStatusV2.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatusV2();
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusV2OrBuilder();

    /**
     * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
     * @return Whether the fullBlockReportLeaseId field is set.
     */
    boolean hasFullBlockReportLeaseId();
    /**
     * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
     * @return The fullBlockReportLeaseId.
     */
    long getFullBlockReportLeaseId();

    /**
     * <code>optional bool isSlownode = 6 [default = false];</code>
     * @return Whether the isSlownode field is set.
     */
    boolean hasIsSlownode();
    /**
     * <code>optional bool isSlownode = 6 [default = false];</code>
     * @return The isSlownode.
     */
    boolean getIsSlownode();
  }
  /**
   * <pre>
   **
   * cmds - Commands from namenode to datanode.
   * haStatus - Status (from an HA perspective) of the NN sending this response
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.HeartbeatResponseProto}
   */
  public static final class HeartbeatResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.HeartbeatResponseProto)
      HeartbeatResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use HeartbeatResponseProto.newBuilder() to construct.
    private HeartbeatResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private HeartbeatResponseProto() {
      cmds_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new HeartbeatResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int CMDS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> cmds_;
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> getCmdsList() {
      return cmds_;
    }
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> 
        getCmdsOrBuilderList() {
      return cmds_;
    }
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    @java.lang.Override
    public int getCmdsCount() {
      return cmds_.size();
    }
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
      return cmds_.get(index);
    }
    /**
     * <pre>
     * Returned commands can be null
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
        int index) {
      return cmds_.get(index);
    }

    public static final int HASTATUS_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto haStatus_;
    /**
     * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
     * @return Whether the haStatus field is set.
     */
    @java.lang.Override
    public boolean hasHaStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
     * @return The haStatus.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getHaStatus() {
      return haStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance() : haStatus_;
    }
    /**
     * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder() {
      return haStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance() : haStatus_;
    }

    public static final int ROLLINGUPGRADESTATUS_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatus_;
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
     * @return Whether the rollingUpgradeStatus field is set.
     */
    @java.lang.Override
    public boolean hasRollingUpgradeStatus() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
     * @return The rollingUpgradeStatus.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatus() {
      return rollingUpgradeStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatus_;
    }
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusOrBuilder() {
      return rollingUpgradeStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatus_;
    }

    public static final int ROLLINGUPGRADESTATUSV2_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatusV2_;
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
     * @return Whether the rollingUpgradeStatusV2 field is set.
     */
    @java.lang.Override
    public boolean hasRollingUpgradeStatusV2() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
     * @return The rollingUpgradeStatusV2.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatusV2() {
      return rollingUpgradeStatusV2_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatusV2_;
    }
    /**
     * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusV2OrBuilder() {
      return rollingUpgradeStatusV2_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatusV2_;
    }

    public static final int FULLBLOCKREPORTLEASEID_FIELD_NUMBER = 5;
    private long fullBlockReportLeaseId_ = 0L;
    /**
     * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
     * @return Whether the fullBlockReportLeaseId field is set.
     */
    @java.lang.Override
    public boolean hasFullBlockReportLeaseId() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
     * @return The fullBlockReportLeaseId.
     */
    @java.lang.Override
    public long getFullBlockReportLeaseId() {
      return fullBlockReportLeaseId_;
    }

    public static final int ISSLOWNODE_FIELD_NUMBER = 6;
    private boolean isSlownode_ = false;
    /**
     * <code>optional bool isSlownode = 6 [default = false];</code>
     * @return Whether the isSlownode field is set.
     */
    @java.lang.Override
    public boolean hasIsSlownode() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional bool isSlownode = 6 [default = false];</code>
     * @return The isSlownode.
     */
    @java.lang.Override
    public boolean getIsSlownode() {
      return isSlownode_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHaStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getCmdsCount(); i++) {
        if (!getCmds(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (!getHaStatus().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasRollingUpgradeStatus()) {
        if (!getRollingUpgradeStatus().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasRollingUpgradeStatusV2()) {
        if (!getRollingUpgradeStatusV2().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < cmds_.size(); i++) {
        output.writeMessage(1, cmds_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(2, getHaStatus());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(3, getRollingUpgradeStatus());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(4, getRollingUpgradeStatusV2());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(5, fullBlockReportLeaseId_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeBool(6, isSlownode_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < cmds_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, cmds_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getHaStatus());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getRollingUpgradeStatus());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getRollingUpgradeStatusV2());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, fullBlockReportLeaseId_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(6, isSlownode_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) obj;

      if (!getCmdsList()
          .equals(other.getCmdsList())) return false;
      if (hasHaStatus() != other.hasHaStatus()) return false;
      if (hasHaStatus()) {
        if (!getHaStatus()
            .equals(other.getHaStatus())) return false;
      }
      if (hasRollingUpgradeStatus() != other.hasRollingUpgradeStatus()) return false;
      if (hasRollingUpgradeStatus()) {
        if (!getRollingUpgradeStatus()
            .equals(other.getRollingUpgradeStatus())) return false;
      }
      if (hasRollingUpgradeStatusV2() != other.hasRollingUpgradeStatusV2()) return false;
      if (hasRollingUpgradeStatusV2()) {
        if (!getRollingUpgradeStatusV2()
            .equals(other.getRollingUpgradeStatusV2())) return false;
      }
      if (hasFullBlockReportLeaseId() != other.hasFullBlockReportLeaseId()) return false;
      if (hasFullBlockReportLeaseId()) {
        if (getFullBlockReportLeaseId()
            != other.getFullBlockReportLeaseId()) return false;
      }
      if (hasIsSlownode() != other.hasIsSlownode()) return false;
      if (hasIsSlownode()) {
        if (getIsSlownode()
            != other.getIsSlownode()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getCmdsCount() > 0) {
        hash = (37 * hash) + CMDS_FIELD_NUMBER;
        hash = (53 * hash) + getCmdsList().hashCode();
      }
      if (hasHaStatus()) {
        hash = (37 * hash) + HASTATUS_FIELD_NUMBER;
        hash = (53 * hash) + getHaStatus().hashCode();
      }
      if (hasRollingUpgradeStatus()) {
        hash = (37 * hash) + ROLLINGUPGRADESTATUS_FIELD_NUMBER;
        hash = (53 * hash) + getRollingUpgradeStatus().hashCode();
      }
      if (hasRollingUpgradeStatusV2()) {
        hash = (37 * hash) + ROLLINGUPGRADESTATUSV2_FIELD_NUMBER;
        hash = (53 * hash) + getRollingUpgradeStatusV2().hashCode();
      }
      if (hasFullBlockReportLeaseId()) {
        hash = (37 * hash) + FULLBLOCKREPORTLEASEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFullBlockReportLeaseId());
      }
      if (hasIsSlownode()) {
        hash = (37 * hash) + ISSLOWNODE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getIsSlownode());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * cmds - Commands from namenode to datanode.
     * haStatus - Status (from an HA perspective) of the NN sending this response
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.HeartbeatResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.HeartbeatResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getCmdsFieldBuilder();
          getHaStatusFieldBuilder();
          getRollingUpgradeStatusFieldBuilder();
          getRollingUpgradeStatusV2FieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (cmdsBuilder_ == null) {
          cmds_ = java.util.Collections.emptyList();
        } else {
          cmds_ = null;
          cmdsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        haStatus_ = null;
        if (haStatusBuilder_ != null) {
          haStatusBuilder_.dispose();
          haStatusBuilder_ = null;
        }
        rollingUpgradeStatus_ = null;
        if (rollingUpgradeStatusBuilder_ != null) {
          rollingUpgradeStatusBuilder_.dispose();
          rollingUpgradeStatusBuilder_ = null;
        }
        rollingUpgradeStatusV2_ = null;
        if (rollingUpgradeStatusV2Builder_ != null) {
          rollingUpgradeStatusV2Builder_.dispose();
          rollingUpgradeStatusV2Builder_ = null;
        }
        fullBlockReportLeaseId_ = 0L;
        isSlownode_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result) {
        if (cmdsBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            cmds_ = java.util.Collections.unmodifiableList(cmds_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.cmds_ = cmds_;
        } else {
          result.cmds_ = cmdsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.haStatus_ = haStatusBuilder_ == null
              ? haStatus_
              : haStatusBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.rollingUpgradeStatus_ = rollingUpgradeStatusBuilder_ == null
              ? rollingUpgradeStatus_
              : rollingUpgradeStatusBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.rollingUpgradeStatusV2_ = rollingUpgradeStatusV2Builder_ == null
              ? rollingUpgradeStatusV2_
              : rollingUpgradeStatusV2Builder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.fullBlockReportLeaseId_ = fullBlockReportLeaseId_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.isSlownode_ = isSlownode_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
        if (cmdsBuilder_ == null) {
          if (!other.cmds_.isEmpty()) {
            if (cmds_.isEmpty()) {
              cmds_ = other.cmds_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureCmdsIsMutable();
              cmds_.addAll(other.cmds_);
            }
            onChanged();
          }
        } else {
          if (!other.cmds_.isEmpty()) {
            if (cmdsBuilder_.isEmpty()) {
              cmdsBuilder_.dispose();
              cmdsBuilder_ = null;
              cmds_ = other.cmds_;
              bitField0_ = (bitField0_ & ~0x00000001);
              cmdsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getCmdsFieldBuilder() : null;
            } else {
              cmdsBuilder_.addAllMessages(other.cmds_);
            }
          }
        }
        if (other.hasHaStatus()) {
          mergeHaStatus(other.getHaStatus());
        }
        if (other.hasRollingUpgradeStatus()) {
          mergeRollingUpgradeStatus(other.getRollingUpgradeStatus());
        }
        if (other.hasRollingUpgradeStatusV2()) {
          mergeRollingUpgradeStatusV2(other.getRollingUpgradeStatusV2());
        }
        if (other.hasFullBlockReportLeaseId()) {
          setFullBlockReportLeaseId(other.getFullBlockReportLeaseId());
        }
        if (other.hasIsSlownode()) {
          setIsSlownode(other.getIsSlownode());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHaStatus()) {
          return false;
        }
        for (int i = 0; i < getCmdsCount(); i++) {
          if (!getCmds(i).isInitialized()) {
            return false;
          }
        }
        if (!getHaStatus().isInitialized()) {
          return false;
        }
        if (hasRollingUpgradeStatus()) {
          if (!getRollingUpgradeStatus().isInitialized()) {
            return false;
          }
        }
        if (hasRollingUpgradeStatusV2()) {
          if (!getRollingUpgradeStatusV2().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.PARSER,
                        extensionRegistry);
                if (cmdsBuilder_ == null) {
                  ensureCmdsIsMutable();
                  cmds_.add(m);
                } else {
                  cmdsBuilder_.addMessage(m);
                }
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getHaStatusFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getRollingUpgradeStatusFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getRollingUpgradeStatusV2FieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 40: {
                fullBlockReportLeaseId_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                isSlownode_ = input.readBool();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> cmds_ =
        java.util.Collections.emptyList();
      private void ensureCmdsIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          cmds_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto>(cmds_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdsBuilder_;

      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> getCmdsList() {
        if (cmdsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(cmds_);
        } else {
          return cmdsBuilder_.getMessageList();
        }
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public int getCmdsCount() {
        if (cmdsBuilder_ == null) {
          return cmds_.size();
        } else {
          return cmdsBuilder_.getCount();
        }
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
        if (cmdsBuilder_ == null) {
          return cmds_.get(index);
        } else {
          return cmdsBuilder_.getMessage(index);
        }
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder setCmds(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCmdsIsMutable();
          cmds_.set(index, value);
          onChanged();
        } else {
          cmdsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder setCmds(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
        if (cmdsBuilder_ == null) {
          ensureCmdsIsMutable();
          cmds_.set(index, builderForValue.build());
          onChanged();
        } else {
          cmdsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder addCmds(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCmdsIsMutable();
          cmds_.add(value);
          onChanged();
        } else {
          cmdsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder addCmds(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCmdsIsMutable();
          cmds_.add(index, value);
          onChanged();
        } else {
          cmdsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder addCmds(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
        if (cmdsBuilder_ == null) {
          ensureCmdsIsMutable();
          cmds_.add(builderForValue.build());
          onChanged();
        } else {
          cmdsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder addCmds(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
        if (cmdsBuilder_ == null) {
          ensureCmdsIsMutable();
          cmds_.add(index, builderForValue.build());
          onChanged();
        } else {
          cmdsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder addAllCmds(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> values) {
        if (cmdsBuilder_ == null) {
          ensureCmdsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, cmds_);
          onChanged();
        } else {
          cmdsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder clearCmds() {
        if (cmdsBuilder_ == null) {
          cmds_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          cmdsBuilder_.clear();
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public Builder removeCmds(int index) {
        if (cmdsBuilder_ == null) {
          ensureCmdsIsMutable();
          cmds_.remove(index);
          onChanged();
        } else {
          cmdsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdsBuilder(
          int index) {
        return getCmdsFieldBuilder().getBuilder(index);
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
          int index) {
        if (cmdsBuilder_ == null) {
          return cmds_.get(index);  } else {
          return cmdsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> 
           getCmdsOrBuilderList() {
        if (cmdsBuilder_ != null) {
          return cmdsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(cmds_);
        }
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder() {
        return getCmdsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder(
          int index) {
        return getCmdsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
      }
      /**
       * <pre>
       * Returned commands can be null
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.datanode.DatanodeCommandProto cmds = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder> 
           getCmdsBuilderList() {
        return getCmdsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> 
          getCmdsFieldBuilder() {
        if (cmdsBuilder_ == null) {
          cmdsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
                  cmds_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          cmds_ = null;
        }
        return cmdsBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto haStatus_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder> haStatusBuilder_;
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       * @return Whether the haStatus field is set.
       */
      public boolean hasHaStatus() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       * @return The haStatus.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto getHaStatus() {
        if (haStatusBuilder_ == null) {
          return haStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance() : haStatus_;
        } else {
          return haStatusBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      public Builder setHaStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto value) {
        if (haStatusBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          haStatus_ = value;
        } else {
          haStatusBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      public Builder setHaStatus(
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder builderForValue) {
        if (haStatusBuilder_ == null) {
          haStatus_ = builderForValue.build();
        } else {
          haStatusBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      public Builder mergeHaStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto value) {
        if (haStatusBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            haStatus_ != null &&
            haStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance()) {
            getHaStatusBuilder().mergeFrom(value);
          } else {
            haStatus_ = value;
          }
        } else {
          haStatusBuilder_.mergeFrom(value);
        }
        if (haStatus_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      public Builder clearHaStatus() {
        bitField0_ = (bitField0_ & ~0x00000002);
        haStatus_ = null;
        if (haStatusBuilder_ != null) {
          haStatusBuilder_.dispose();
          haStatusBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder getHaStatusBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getHaStatusFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder getHaStatusOrBuilder() {
        if (haStatusBuilder_ != null) {
          return haStatusBuilder_.getMessageOrBuilder();
        } else {
          return haStatus_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.getDefaultInstance() : haStatus_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.NNHAStatusHeartbeatProto haStatus = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder> 
          getHaStatusFieldBuilder() {
        if (haStatusBuilder_ == null) {
          haStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProtoOrBuilder>(
                  getHaStatus(),
                  getParentForChildren(),
                  isClean());
          haStatus_ = null;
        }
        return haStatusBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatus_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> rollingUpgradeStatusBuilder_;
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       * @return Whether the rollingUpgradeStatus field is set.
       */
      public boolean hasRollingUpgradeStatus() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       * @return The rollingUpgradeStatus.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatus() {
        if (rollingUpgradeStatusBuilder_ == null) {
          return rollingUpgradeStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatus_;
        } else {
          return rollingUpgradeStatusBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      public Builder setRollingUpgradeStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
        if (rollingUpgradeStatusBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          rollingUpgradeStatus_ = value;
        } else {
          rollingUpgradeStatusBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      public Builder setRollingUpgradeStatus(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder builderForValue) {
        if (rollingUpgradeStatusBuilder_ == null) {
          rollingUpgradeStatus_ = builderForValue.build();
        } else {
          rollingUpgradeStatusBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      public Builder mergeRollingUpgradeStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
        if (rollingUpgradeStatusBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            rollingUpgradeStatus_ != null &&
            rollingUpgradeStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) {
            getRollingUpgradeStatusBuilder().mergeFrom(value);
          } else {
            rollingUpgradeStatus_ = value;
          }
        } else {
          rollingUpgradeStatusBuilder_.mergeFrom(value);
        }
        if (rollingUpgradeStatus_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      public Builder clearRollingUpgradeStatus() {
        bitField0_ = (bitField0_ & ~0x00000004);
        rollingUpgradeStatus_ = null;
        if (rollingUpgradeStatusBuilder_ != null) {
          rollingUpgradeStatusBuilder_.dispose();
          rollingUpgradeStatusBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder getRollingUpgradeStatusBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getRollingUpgradeStatusFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusOrBuilder() {
        if (rollingUpgradeStatusBuilder_ != null) {
          return rollingUpgradeStatusBuilder_.getMessageOrBuilder();
        } else {
          return rollingUpgradeStatus_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatus_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatus = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> 
          getRollingUpgradeStatusFieldBuilder() {
        if (rollingUpgradeStatusBuilder_ == null) {
          rollingUpgradeStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>(
                  getRollingUpgradeStatus(),
                  getParentForChildren(),
                  isClean());
          rollingUpgradeStatus_ = null;
        }
        return rollingUpgradeStatusBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto rollingUpgradeStatusV2_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> rollingUpgradeStatusV2Builder_;
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       * @return Whether the rollingUpgradeStatusV2 field is set.
       */
      public boolean hasRollingUpgradeStatusV2() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       * @return The rollingUpgradeStatusV2.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getRollingUpgradeStatusV2() {
        if (rollingUpgradeStatusV2Builder_ == null) {
          return rollingUpgradeStatusV2_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatusV2_;
        } else {
          return rollingUpgradeStatusV2Builder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      public Builder setRollingUpgradeStatusV2(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
        if (rollingUpgradeStatusV2Builder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          rollingUpgradeStatusV2_ = value;
        } else {
          rollingUpgradeStatusV2Builder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      public Builder setRollingUpgradeStatusV2(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder builderForValue) {
        if (rollingUpgradeStatusV2Builder_ == null) {
          rollingUpgradeStatusV2_ = builderForValue.build();
        } else {
          rollingUpgradeStatusV2Builder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      public Builder mergeRollingUpgradeStatusV2(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto value) {
        if (rollingUpgradeStatusV2Builder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            rollingUpgradeStatusV2_ != null &&
            rollingUpgradeStatusV2_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) {
            getRollingUpgradeStatusV2Builder().mergeFrom(value);
          } else {
            rollingUpgradeStatusV2_ = value;
          }
        } else {
          rollingUpgradeStatusV2Builder_.mergeFrom(value);
        }
        if (rollingUpgradeStatusV2_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      public Builder clearRollingUpgradeStatusV2() {
        bitField0_ = (bitField0_ & ~0x00000008);
        rollingUpgradeStatusV2_ = null;
        if (rollingUpgradeStatusV2Builder_ != null) {
          rollingUpgradeStatusV2Builder_.dispose();
          rollingUpgradeStatusV2Builder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder getRollingUpgradeStatusV2Builder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getRollingUpgradeStatusV2FieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder getRollingUpgradeStatusV2OrBuilder() {
        if (rollingUpgradeStatusV2Builder_ != null) {
          return rollingUpgradeStatusV2Builder_.getMessageOrBuilder();
        } else {
          return rollingUpgradeStatusV2_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance() : rollingUpgradeStatusV2_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.RollingUpgradeStatusProto rollingUpgradeStatusV2 = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder> 
          getRollingUpgradeStatusV2FieldBuilder() {
        if (rollingUpgradeStatusV2Builder_ == null) {
          rollingUpgradeStatusV2Builder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder>(
                  getRollingUpgradeStatusV2(),
                  getParentForChildren(),
                  isClean());
          rollingUpgradeStatusV2_ = null;
        }
        return rollingUpgradeStatusV2Builder_;
      }

      private long fullBlockReportLeaseId_ ;
      /**
       * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
       * @return Whether the fullBlockReportLeaseId field is set.
       */
      @java.lang.Override
      public boolean hasFullBlockReportLeaseId() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
       * @return The fullBlockReportLeaseId.
       */
      @java.lang.Override
      public long getFullBlockReportLeaseId() {
        return fullBlockReportLeaseId_;
      }
      /**
       * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
       * @param value The fullBlockReportLeaseId to set.
       * @return This builder for chaining.
       */
      public Builder setFullBlockReportLeaseId(long value) {

        fullBlockReportLeaseId_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 fullBlockReportLeaseId = 5 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearFullBlockReportLeaseId() {
        bitField0_ = (bitField0_ & ~0x00000010);
        fullBlockReportLeaseId_ = 0L;
        onChanged();
        return this;
      }

      private boolean isSlownode_ ;
      /**
       * <code>optional bool isSlownode = 6 [default = false];</code>
       * @return Whether the isSlownode field is set.
       */
      @java.lang.Override
      public boolean hasIsSlownode() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional bool isSlownode = 6 [default = false];</code>
       * @return The isSlownode.
       */
      @java.lang.Override
      public boolean getIsSlownode() {
        return isSlownode_;
      }
      /**
       * <code>optional bool isSlownode = 6 [default = false];</code>
       * @param value The isSlownode to set.
       * @return This builder for chaining.
       */
      public Builder setIsSlownode(boolean value) {

        isSlownode_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool isSlownode = 6 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearIsSlownode() {
        bitField0_ = (bitField0_ & ~0x00000020);
        isSlownode_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.HeartbeatResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.HeartbeatResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<HeartbeatResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<HeartbeatResponseProto>() {
      @java.lang.Override
      public HeartbeatResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<HeartbeatResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<HeartbeatResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockReportRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockReportRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    boolean hasRegistration();
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();

    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> 
        getReportsList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index);
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    int getReportsCount();
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> 
        getReportsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
        int index);

    /**
     * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
     * @return Whether the context field is set.
     */
    boolean hasContext();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
     * @return The context.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getContext();
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder getContextOrBuilder();
  }
  /**
   * <pre>
   **
   * registration - datanode registration information
   * blockPoolID  - block pool ID of the reported blocks
   * blocks       - each block is represented as multiple longs in the array.
   *                first long represents block ID
   *                second long represents length
   *                third long represents gen stamp
   *                fourth long (if under construction) represents replica state
   * context      - An optional field containing information about the context
   *                of this block report.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockReportRequestProto}
   */
  public static final class BlockReportRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockReportRequestProto)
      BlockReportRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockReportRequestProto.newBuilder() to construct.
    private BlockReportRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockReportRequestProto() {
      blockPoolId_ = "";
      reports_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockReportRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int REGISTRATION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    @java.lang.Override
    public boolean hasRegistration() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int REPORTS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> reports_;
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> getReportsList() {
      return reports_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> 
        getReportsOrBuilderList() {
      return reports_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    @java.lang.Override
    public int getReportsCount() {
      return reports_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index) {
      return reports_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
        int index) {
      return reports_.get(index);
    }

    public static final int CONTEXT_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto context_;
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
     * @return Whether the context field is set.
     */
    @java.lang.Override
    public boolean hasContext() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
     * @return The context.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getContext() {
      return context_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance() : context_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder getContextOrBuilder() {
      return context_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance() : context_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistration()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistration().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getReportsCount(); i++) {
        if (!getReports(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasContext()) {
        if (!getContext().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistration());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_);
      }
      for (int i = 0; i < reports_.size(); i++) {
        output.writeMessage(3, reports_.get(i));
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(4, getContext());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistration());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_);
      }
      for (int i = 0; i < reports_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, reports_.get(i));
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getContext());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) obj;

      if (hasRegistration() != other.hasRegistration()) return false;
      if (hasRegistration()) {
        if (!getRegistration()
            .equals(other.getRegistration())) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (!getReportsList()
          .equals(other.getReportsList())) return false;
      if (hasContext() != other.hasContext()) return false;
      if (hasContext()) {
        if (!getContext()
            .equals(other.getContext())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistration()) {
        hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistration().hashCode();
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (getReportsCount() > 0) {
        hash = (37 * hash) + REPORTS_FIELD_NUMBER;
        hash = (53 * hash) + getReportsList().hashCode();
      }
      if (hasContext()) {
        hash = (37 * hash) + CONTEXT_FIELD_NUMBER;
        hash = (53 * hash) + getContext().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registration - datanode registration information
     * blockPoolID  - block pool ID of the reported blocks
     * blocks       - each block is represented as multiple longs in the array.
     *                first long represents block ID
     *                second long represents length
     *                third long represents gen stamp
     *                fourth long (if under construction) represents replica state
     * context      - An optional field containing information about the context
     *                of this block report.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockReportRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockReportRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistrationFieldBuilder();
          getReportsFieldBuilder();
          getContextFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        blockPoolId_ = "";
        if (reportsBuilder_ == null) {
          reports_ = java.util.Collections.emptyList();
        } else {
          reports_ = null;
          reportsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000004);
        context_ = null;
        if (contextBuilder_ != null) {
          contextBuilder_.dispose();
          contextBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result) {
        if (reportsBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0)) {
            reports_ = java.util.Collections.unmodifiableList(reports_);
            bitField0_ = (bitField0_ & ~0x00000004);
          }
          result.reports_ = reports_;
        } else {
          result.reports_ = reportsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registration_ = registrationBuilder_ == null
              ? registration_
              : registrationBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.context_ = contextBuilder_ == null
              ? context_
              : contextBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance()) return this;
        if (other.hasRegistration()) {
          mergeRegistration(other.getRegistration());
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (reportsBuilder_ == null) {
          if (!other.reports_.isEmpty()) {
            if (reports_.isEmpty()) {
              reports_ = other.reports_;
              bitField0_ = (bitField0_ & ~0x00000004);
            } else {
              ensureReportsIsMutable();
              reports_.addAll(other.reports_);
            }
            onChanged();
          }
        } else {
          if (!other.reports_.isEmpty()) {
            if (reportsBuilder_.isEmpty()) {
              reportsBuilder_.dispose();
              reportsBuilder_ = null;
              reports_ = other.reports_;
              bitField0_ = (bitField0_ & ~0x00000004);
              reportsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getReportsFieldBuilder() : null;
            } else {
              reportsBuilder_.addAllMessages(other.reports_);
            }
          }
        }
        if (other.hasContext()) {
          mergeContext(other.getContext());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistration()) {
          return false;
        }
        if (!hasBlockPoolId()) {
          return false;
        }
        if (!getRegistration().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getReportsCount(); i++) {
          if (!getReports(i).isInitialized()) {
            return false;
          }
        }
        if (hasContext()) {
          if (!getContext().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistrationFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.PARSER,
                        extensionRegistry);
                if (reportsBuilder_ == null) {
                  ensureReportsIsMutable();
                  reports_.add(m);
                } else {
                  reportsBuilder_.addMessage(m);
                }
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getContextFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return Whether the registration field is set.
       */
      public boolean hasRegistration() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return The registration.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
        if (registrationBuilder_ == null) {
          return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        } else {
          return registrationBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registration_ = value;
        } else {
          registrationBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registrationBuilder_ == null) {
          registration_ = builderForValue.build();
        } else {
          registrationBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registration_ != null &&
            registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistrationBuilder().mergeFrom(value);
          } else {
            registration_ = value;
          }
        } else {
          registrationBuilder_.mergeFrom(value);
        }
        if (registration_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder clearRegistration() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistrationFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
        if (registrationBuilder_ != null) {
          return registrationBuilder_.getMessageOrBuilder();
        } else {
          return registration_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistrationFieldBuilder() {
        if (registrationBuilder_ == null) {
          registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistration(),
                  getParentForChildren(),
                  isClean());
          registration_ = null;
        }
        return registrationBuilder_;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> reports_ =
        java.util.Collections.emptyList();
      private void ensureReportsIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          reports_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto>(reports_);
          bitField0_ |= 0x00000004;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> reportsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> getReportsList() {
        if (reportsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(reports_);
        } else {
          return reportsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public int getReportsCount() {
        if (reportsBuilder_ == null) {
          return reports_.size();
        } else {
          return reportsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getReports(int index) {
        if (reportsBuilder_ == null) {
          return reports_.get(index);
        } else {
          return reportsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder setReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
        if (reportsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureReportsIsMutable();
          reports_.set(index, value);
          onChanged();
        } else {
          reportsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder setReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.set(index, builderForValue.build());
          onChanged();
        } else {
          reportsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder addReports(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
        if (reportsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureReportsIsMutable();
          reports_.add(value);
          onChanged();
        } else {
          reportsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder addReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto value) {
        if (reportsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureReportsIsMutable();
          reports_.add(index, value);
          onChanged();
        } else {
          reportsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder addReports(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.add(builderForValue.build());
          onChanged();
        } else {
          reportsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder addReports(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder builderForValue) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.add(index, builderForValue.build());
          onChanged();
        } else {
          reportsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder addAllReports(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto> values) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, reports_);
          onChanged();
        } else {
          reportsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder clearReports() {
        if (reportsBuilder_ == null) {
          reports_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
        } else {
          reportsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public Builder removeReports(int index) {
        if (reportsBuilder_ == null) {
          ensureReportsIsMutable();
          reports_.remove(index);
          onChanged();
        } else {
          reportsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder getReportsBuilder(
          int index) {
        return getReportsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder getReportsOrBuilder(
          int index) {
        if (reportsBuilder_ == null) {
          return reports_.get(index);  } else {
          return reportsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> 
           getReportsOrBuilderList() {
        if (reportsBuilder_ != null) {
          return reportsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(reports_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder addReportsBuilder() {
        return getReportsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder addReportsBuilder(
          int index) {
        return getReportsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageBlockReportProto reports = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder> 
           getReportsBuilderList() {
        return getReportsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder> 
          getReportsFieldBuilder() {
        if (reportsBuilder_ == null) {
          reportsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder>(
                  reports_,
                  ((bitField0_ & 0x00000004) != 0),
                  getParentForChildren(),
                  isClean());
          reports_ = null;
        }
        return reportsBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto context_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder> contextBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       * @return Whether the context field is set.
       */
      public boolean hasContext() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       * @return The context.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getContext() {
        if (contextBuilder_ == null) {
          return context_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance() : context_;
        } else {
          return contextBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      public Builder setContext(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto value) {
        if (contextBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          context_ = value;
        } else {
          contextBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      public Builder setContext(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder builderForValue) {
        if (contextBuilder_ == null) {
          context_ = builderForValue.build();
        } else {
          contextBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      public Builder mergeContext(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto value) {
        if (contextBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            context_ != null &&
            context_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance()) {
            getContextBuilder().mergeFrom(value);
          } else {
            context_ = value;
          }
        } else {
          contextBuilder_.mergeFrom(value);
        }
        if (context_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      public Builder clearContext() {
        bitField0_ = (bitField0_ & ~0x00000008);
        context_ = null;
        if (contextBuilder_ != null) {
          contextBuilder_.dispose();
          contextBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder getContextBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getContextFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder getContextOrBuilder() {
        if (contextBuilder_ != null) {
          return contextBuilder_.getMessageOrBuilder();
        } else {
          return context_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance() : context_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.BlockReportContextProto context = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder> 
          getContextFieldBuilder() {
        if (contextBuilder_ == null) {
          contextBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder>(
                  getContext(),
                  getParentForChildren(),
                  isClean());
          context_ = null;
        }
        return contextBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReportRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReportRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockReportRequestProto>() {
      @java.lang.Override
      public BlockReportRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockReportContextProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockReportContextProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * The total number of RPCs this block report is broken into.
     * </pre>
     *
     * <code>required int32 totalRpcs = 1;</code>
     * @return Whether the totalRpcs field is set.
     */
    boolean hasTotalRpcs();
    /**
     * <pre>
     * The total number of RPCs this block report is broken into.
     * </pre>
     *
     * <code>required int32 totalRpcs = 1;</code>
     * @return The totalRpcs.
     */
    int getTotalRpcs();

    /**
     * <pre>
     * The index of the current RPC (zero-based)
     * </pre>
     *
     * <code>required int32 curRpc = 2;</code>
     * @return Whether the curRpc field is set.
     */
    boolean hasCurRpc();
    /**
     * <pre>
     * The index of the current RPC (zero-based)
     * </pre>
     *
     * <code>required int32 curRpc = 2;</code>
     * @return The curRpc.
     */
    int getCurRpc();

    /**
     * <pre>
     * The unique 64-bit ID of this block report
     * </pre>
     *
     * <code>required int64 id = 3;</code>
     * @return Whether the id field is set.
     */
    boolean hasId();
    /**
     * <pre>
     * The unique 64-bit ID of this block report
     * </pre>
     *
     * <code>required int64 id = 3;</code>
     * @return The id.
     */
    long getId();

    /**
     * <pre>
     * The block report lease ID, or 0 if we are sending without a lease to
     * bypass rate-limiting.
     * </pre>
     *
     * <code>optional uint64 leaseId = 4 [default = 0];</code>
     * @return Whether the leaseId field is set.
     */
    boolean hasLeaseId();
    /**
     * <pre>
     * The block report lease ID, or 0 if we are sending without a lease to
     * bypass rate-limiting.
     * </pre>
     *
     * <code>optional uint64 leaseId = 4 [default = 0];</code>
     * @return The leaseId.
     */
    long getLeaseId();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.datanode.BlockReportContextProto}
   */
  public static final class BlockReportContextProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockReportContextProto)
      BlockReportContextProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockReportContextProto.newBuilder() to construct.
    private BlockReportContextProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockReportContextProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockReportContextProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder.class);
    }

    private int bitField0_;
    public static final int TOTALRPCS_FIELD_NUMBER = 1;
    private int totalRpcs_ = 0;
    /**
     * <pre>
     * The total number of RPCs this block report is broken into.
     * </pre>
     *
     * <code>required int32 totalRpcs = 1;</code>
     * @return Whether the totalRpcs field is set.
     */
    @java.lang.Override
    public boolean hasTotalRpcs() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * The total number of RPCs this block report is broken into.
     * </pre>
     *
     * <code>required int32 totalRpcs = 1;</code>
     * @return The totalRpcs.
     */
    @java.lang.Override
    public int getTotalRpcs() {
      return totalRpcs_;
    }

    public static final int CURRPC_FIELD_NUMBER = 2;
    private int curRpc_ = 0;
    /**
     * <pre>
     * The index of the current RPC (zero-based)
     * </pre>
     *
     * <code>required int32 curRpc = 2;</code>
     * @return Whether the curRpc field is set.
     */
    @java.lang.Override
    public boolean hasCurRpc() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * The index of the current RPC (zero-based)
     * </pre>
     *
     * <code>required int32 curRpc = 2;</code>
     * @return The curRpc.
     */
    @java.lang.Override
    public int getCurRpc() {
      return curRpc_;
    }

    public static final int ID_FIELD_NUMBER = 3;
    private long id_ = 0L;
    /**
     * <pre>
     * The unique 64-bit ID of this block report
     * </pre>
     *
     * <code>required int64 id = 3;</code>
     * @return Whether the id field is set.
     */
    @java.lang.Override
    public boolean hasId() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * The unique 64-bit ID of this block report
     * </pre>
     *
     * <code>required int64 id = 3;</code>
     * @return The id.
     */
    @java.lang.Override
    public long getId() {
      return id_;
    }

    public static final int LEASEID_FIELD_NUMBER = 4;
    private long leaseId_ = 0L;
    /**
     * <pre>
     * The block report lease ID, or 0 if we are sending without a lease to
     * bypass rate-limiting.
     * </pre>
     *
     * <code>optional uint64 leaseId = 4 [default = 0];</code>
     * @return Whether the leaseId field is set.
     */
    @java.lang.Override
    public boolean hasLeaseId() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * The block report lease ID, or 0 if we are sending without a lease to
     * bypass rate-limiting.
     * </pre>
     *
     * <code>optional uint64 leaseId = 4 [default = 0];</code>
     * @return The leaseId.
     */
    @java.lang.Override
    public long getLeaseId() {
      return leaseId_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasTotalRpcs()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCurRpc()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeInt32(1, totalRpcs_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt32(2, curRpc_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, id_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, leaseId_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(1, totalRpcs_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(2, curRpc_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, id_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, leaseId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto) obj;

      if (hasTotalRpcs() != other.hasTotalRpcs()) return false;
      if (hasTotalRpcs()) {
        if (getTotalRpcs()
            != other.getTotalRpcs()) return false;
      }
      if (hasCurRpc() != other.hasCurRpc()) return false;
      if (hasCurRpc()) {
        if (getCurRpc()
            != other.getCurRpc()) return false;
      }
      if (hasId() != other.hasId()) return false;
      if (hasId()) {
        if (getId()
            != other.getId()) return false;
      }
      if (hasLeaseId() != other.hasLeaseId()) return false;
      if (hasLeaseId()) {
        if (getLeaseId()
            != other.getLeaseId()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasTotalRpcs()) {
        hash = (37 * hash) + TOTALRPCS_FIELD_NUMBER;
        hash = (53 * hash) + getTotalRpcs();
      }
      if (hasCurRpc()) {
        hash = (37 * hash) + CURRPC_FIELD_NUMBER;
        hash = (53 * hash) + getCurRpc();
      }
      if (hasId()) {
        hash = (37 * hash) + ID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getId());
      }
      if (hasLeaseId()) {
        hash = (37 * hash) + LEASEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLeaseId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.datanode.BlockReportContextProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockReportContextProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        totalRpcs_ = 0;
        curRpc_ = 0;
        id_ = 0L;
        leaseId_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.totalRpcs_ = totalRpcs_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.curRpc_ = curRpc_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.id_ = id_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.leaseId_ = leaseId_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto.getDefaultInstance()) return this;
        if (other.hasTotalRpcs()) {
          setTotalRpcs(other.getTotalRpcs());
        }
        if (other.hasCurRpc()) {
          setCurRpc(other.getCurRpc());
        }
        if (other.hasId()) {
          setId(other.getId());
        }
        if (other.hasLeaseId()) {
          setLeaseId(other.getLeaseId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasTotalRpcs()) {
          return false;
        }
        if (!hasCurRpc()) {
          return false;
        }
        if (!hasId()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                totalRpcs_ = input.readInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                curRpc_ = input.readInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                id_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                leaseId_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int totalRpcs_ ;
      /**
       * <pre>
       * The total number of RPCs this block report is broken into.
       * </pre>
       *
       * <code>required int32 totalRpcs = 1;</code>
       * @return Whether the totalRpcs field is set.
       */
      @java.lang.Override
      public boolean hasTotalRpcs() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * The total number of RPCs this block report is broken into.
       * </pre>
       *
       * <code>required int32 totalRpcs = 1;</code>
       * @return The totalRpcs.
       */
      @java.lang.Override
      public int getTotalRpcs() {
        return totalRpcs_;
      }
      /**
       * <pre>
       * The total number of RPCs this block report is broken into.
       * </pre>
       *
       * <code>required int32 totalRpcs = 1;</code>
       * @param value The totalRpcs to set.
       * @return This builder for chaining.
       */
      public Builder setTotalRpcs(int value) {

        totalRpcs_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The total number of RPCs this block report is broken into.
       * </pre>
       *
       * <code>required int32 totalRpcs = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearTotalRpcs() {
        bitField0_ = (bitField0_ & ~0x00000001);
        totalRpcs_ = 0;
        onChanged();
        return this;
      }

      private int curRpc_ ;
      /**
       * <pre>
       * The index of the current RPC (zero-based)
       * </pre>
       *
       * <code>required int32 curRpc = 2;</code>
       * @return Whether the curRpc field is set.
       */
      @java.lang.Override
      public boolean hasCurRpc() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * The index of the current RPC (zero-based)
       * </pre>
       *
       * <code>required int32 curRpc = 2;</code>
       * @return The curRpc.
       */
      @java.lang.Override
      public int getCurRpc() {
        return curRpc_;
      }
      /**
       * <pre>
       * The index of the current RPC (zero-based)
       * </pre>
       *
       * <code>required int32 curRpc = 2;</code>
       * @param value The curRpc to set.
       * @return This builder for chaining.
       */
      public Builder setCurRpc(int value) {

        curRpc_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The index of the current RPC (zero-based)
       * </pre>
       *
       * <code>required int32 curRpc = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearCurRpc() {
        bitField0_ = (bitField0_ & ~0x00000002);
        curRpc_ = 0;
        onChanged();
        return this;
      }

      private long id_ ;
      /**
       * <pre>
       * The unique 64-bit ID of this block report
       * </pre>
       *
       * <code>required int64 id = 3;</code>
       * @return Whether the id field is set.
       */
      @java.lang.Override
      public boolean hasId() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * The unique 64-bit ID of this block report
       * </pre>
       *
       * <code>required int64 id = 3;</code>
       * @return The id.
       */
      @java.lang.Override
      public long getId() {
        return id_;
      }
      /**
       * <pre>
       * The unique 64-bit ID of this block report
       * </pre>
       *
       * <code>required int64 id = 3;</code>
       * @param value The id to set.
       * @return This builder for chaining.
       */
      public Builder setId(long value) {

        id_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The unique 64-bit ID of this block report
       * </pre>
       *
       * <code>required int64 id = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearId() {
        bitField0_ = (bitField0_ & ~0x00000004);
        id_ = 0L;
        onChanged();
        return this;
      }

      private long leaseId_ ;
      /**
       * <pre>
       * The block report lease ID, or 0 if we are sending without a lease to
       * bypass rate-limiting.
       * </pre>
       *
       * <code>optional uint64 leaseId = 4 [default = 0];</code>
       * @return Whether the leaseId field is set.
       */
      @java.lang.Override
      public boolean hasLeaseId() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * The block report lease ID, or 0 if we are sending without a lease to
       * bypass rate-limiting.
       * </pre>
       *
       * <code>optional uint64 leaseId = 4 [default = 0];</code>
       * @return The leaseId.
       */
      @java.lang.Override
      public long getLeaseId() {
        return leaseId_;
      }
      /**
       * <pre>
       * The block report lease ID, or 0 if we are sending without a lease to
       * bypass rate-limiting.
       * </pre>
       *
       * <code>optional uint64 leaseId = 4 [default = 0];</code>
       * @param value The leaseId to set.
       * @return This builder for chaining.
       */
      public Builder setLeaseId(long value) {

        leaseId_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The block report lease ID, or 0 if we are sending without a lease to
       * bypass rate-limiting.
       * </pre>
       *
       * <code>optional uint64 leaseId = 4 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearLeaseId() {
        bitField0_ = (bitField0_ & ~0x00000008);
        leaseId_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReportContextProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReportContextProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportContextProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockReportContextProto>() {
      @java.lang.Override
      public BlockReportContextProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportContextProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportContextProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageBlockReportProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.StorageBlockReportProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Storage
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
     * @return Whether the storage field is set.
     */
    boolean hasStorage();
    /**
     * <pre>
     * Storage
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
     * @return The storage.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
    /**
     * <pre>
     * Storage
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();

    /**
     * <code>repeated uint64 blocks = 2 [packed = true];</code>
     * @return A list containing the blocks.
     */
    java.util.List<java.lang.Long> getBlocksList();
    /**
     * <code>repeated uint64 blocks = 2 [packed = true];</code>
     * @return The count of blocks.
     */
    int getBlocksCount();
    /**
     * <code>repeated uint64 blocks = 2 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The blocks at the given index.
     */
    long getBlocks(int index);

    /**
     * <code>optional uint64 numberOfBlocks = 3;</code>
     * @return Whether the numberOfBlocks field is set.
     */
    boolean hasNumberOfBlocks();
    /**
     * <code>optional uint64 numberOfBlocks = 3;</code>
     * @return The numberOfBlocks.
     */
    long getNumberOfBlocks();

    /**
     * <code>repeated bytes blocksBuffers = 4;</code>
     * @return A list containing the blocksBuffers.
     */
    java.util.List<org.apache.hadoop.thirdparty.protobuf.ByteString> getBlocksBuffersList();
    /**
     * <code>repeated bytes blocksBuffers = 4;</code>
     * @return The count of blocksBuffers.
     */
    int getBlocksBuffersCount();
    /**
     * <code>repeated bytes blocksBuffers = 4;</code>
     * @param index The index of the element to return.
     * @return The blocksBuffers at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getBlocksBuffers(int index);
  }
  /**
   * <pre>
   **
   * Report of blocks in a storage
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.StorageBlockReportProto}
   */
  public static final class StorageBlockReportProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.StorageBlockReportProto)
      StorageBlockReportProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageBlockReportProto.newBuilder() to construct.
    private StorageBlockReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageBlockReportProto() {
      blocks_ = emptyLongList();
      blocksBuffers_ = emptyList(org.apache.hadoop.thirdparty.protobuf.ByteString.class);
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageBlockReportProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder.class);
    }

    private int bitField0_;
    public static final int STORAGE_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
    /**
     * <pre>
     * Storage
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
     * @return Whether the storage field is set.
     */
    @java.lang.Override
    public boolean hasStorage() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Storage
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
     * @return The storage.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
      return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
    }
    /**
     * <pre>
     * Storage
     * </pre>
     *
     * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
      return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
    }

    public static final int BLOCKS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.LongList blocks_ =
        emptyLongList();
    /**
     * <code>repeated uint64 blocks = 2 [packed = true];</code>
     * @return A list containing the blocks.
     */
    @java.lang.Override
    public java.util.List<java.lang.Long>
        getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated uint64 blocks = 2 [packed = true];</code>
     * @return The count of blocks.
     */
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated uint64 blocks = 2 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The blocks at the given index.
     */
    public long getBlocks(int index) {
      return blocks_.getLong(index);
    }
    private int blocksMemoizedSerializedSize = -1;

    public static final int NUMBEROFBLOCKS_FIELD_NUMBER = 3;
    private long numberOfBlocks_ = 0L;
    /**
     * <code>optional uint64 numberOfBlocks = 3;</code>
     * @return Whether the numberOfBlocks field is set.
     */
    @java.lang.Override
    public boolean hasNumberOfBlocks() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint64 numberOfBlocks = 3;</code>
     * @return The numberOfBlocks.
     */
    @java.lang.Override
    public long getNumberOfBlocks() {
      return numberOfBlocks_;
    }

    public static final int BLOCKSBUFFERS_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.ProtobufList<org.apache.hadoop.thirdparty.protobuf.ByteString> blocksBuffers_ =
        emptyList(org.apache.hadoop.thirdparty.protobuf.ByteString.class);
    /**
     * <code>repeated bytes blocksBuffers = 4;</code>
     * @return A list containing the blocksBuffers.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.thirdparty.protobuf.ByteString>
        getBlocksBuffersList() {
      return blocksBuffers_;
    }
    /**
     * <code>repeated bytes blocksBuffers = 4;</code>
     * @return The count of blocksBuffers.
     */
    public int getBlocksBuffersCount() {
      return blocksBuffers_.size();
    }
    /**
     * <code>repeated bytes blocksBuffers = 4;</code>
     * @param index The index of the element to return.
     * @return The blocksBuffers at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString getBlocksBuffers(int index) {
      return blocksBuffers_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStorage()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getStorage().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getStorage());
      }
      if (getBlocksList().size() > 0) {
        output.writeUInt32NoTag(18);
        output.writeUInt32NoTag(blocksMemoizedSerializedSize);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeUInt64NoTag(blocks_.getLong(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(3, numberOfBlocks_);
      }
      for (int i = 0; i < blocksBuffers_.size(); i++) {
        output.writeBytes(4, blocksBuffers_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getStorage());
      }
      {
        int dataSize = 0;
        for (int i = 0; i < blocks_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64SizeNoTag(blocks_.getLong(i));
        }
        size += dataSize;
        if (!getBlocksList().isEmpty()) {
          size += 1;
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeInt32SizeNoTag(dataSize);
        }
        blocksMemoizedSerializedSize = dataSize;
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, numberOfBlocks_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < blocksBuffers_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSizeNoTag(blocksBuffers_.get(i));
        }
        size += dataSize;
        size += 1 * getBlocksBuffersList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) obj;

      if (hasStorage() != other.hasStorage()) return false;
      if (hasStorage()) {
        if (!getStorage()
            .equals(other.getStorage())) return false;
      }
      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (hasNumberOfBlocks() != other.hasNumberOfBlocks()) return false;
      if (hasNumberOfBlocks()) {
        if (getNumberOfBlocks()
            != other.getNumberOfBlocks()) return false;
      }
      if (!getBlocksBuffersList()
          .equals(other.getBlocksBuffersList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStorage()) {
        hash = (37 * hash) + STORAGE_FIELD_NUMBER;
        hash = (53 * hash) + getStorage().hashCode();
      }
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      if (hasNumberOfBlocks()) {
        hash = (37 * hash) + NUMBEROFBLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumberOfBlocks());
      }
      if (getBlocksBuffersCount() > 0) {
        hash = (37 * hash) + BLOCKSBUFFERS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksBuffersList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Report of blocks in a storage
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.StorageBlockReportProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.StorageBlockReportProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getStorageFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        storage_ = null;
        if (storageBuilder_ != null) {
          storageBuilder_.dispose();
          storageBuilder_ = null;
        }
        blocks_ = emptyLongList();
        numberOfBlocks_ = 0L;
        blocksBuffers_ = emptyList(org.apache.hadoop.thirdparty.protobuf.ByteString.class);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.storage_ = storageBuilder_ == null
              ? storage_
              : storageBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          blocks_.makeImmutable();
          result.blocks_ = blocks_;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.numberOfBlocks_ = numberOfBlocks_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          blocksBuffers_.makeImmutable();
          result.blocksBuffers_ = blocksBuffers_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto.getDefaultInstance()) return this;
        if (other.hasStorage()) {
          mergeStorage(other.getStorage());
        }
        if (!other.blocks_.isEmpty()) {
          if (blocks_.isEmpty()) {
            blocks_ = other.blocks_;
            blocks_.makeImmutable();
            bitField0_ |= 0x00000002;
          } else {
            ensureBlocksIsMutable();
            blocks_.addAll(other.blocks_);
          }
          onChanged();
        }
        if (other.hasNumberOfBlocks()) {
          setNumberOfBlocks(other.getNumberOfBlocks());
        }
        if (!other.blocksBuffers_.isEmpty()) {
          if (blocksBuffers_.isEmpty()) {
            blocksBuffers_ = other.blocksBuffers_;
            blocksBuffers_.makeImmutable();
            bitField0_ |= 0x00000008;
          } else {
            ensureBlocksBuffersIsMutable();
            blocksBuffers_.addAll(other.blocksBuffers_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStorage()) {
          return false;
        }
        if (!getStorage().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getStorageFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                long v = input.readUInt64();
                ensureBlocksIsMutable();
                blocks_.addLong(v);
                break;
              } // case 16
              case 18: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                ensureBlocksIsMutable();
                while (input.getBytesUntilLimit() > 0) {
                  blocks_.addLong(input.readUInt64());
                }
                input.popLimit(limit);
                break;
              } // case 18
              case 24: {
                numberOfBlocks_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                org.apache.hadoop.thirdparty.protobuf.ByteString v = input.readBytes();
                ensureBlocksBuffersIsMutable();
                blocksBuffers_.add(v);
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       * @return Whether the storage field is set.
       */
      public boolean hasStorage() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       * @return The storage.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
        if (storageBuilder_ == null) {
          return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
        } else {
          return storageBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
        if (storageBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          storage_ = value;
        } else {
          storageBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      public Builder setStorage(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
        if (storageBuilder_ == null) {
          storage_ = builderForValue.build();
        } else {
          storageBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
        if (storageBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            storage_ != null &&
            storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
            getStorageBuilder().mergeFrom(value);
          } else {
            storage_ = value;
          }
        } else {
          storageBuilder_.mergeFrom(value);
        }
        if (storage_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      public Builder clearStorage() {
        bitField0_ = (bitField0_ & ~0x00000001);
        storage_ = null;
        if (storageBuilder_ != null) {
          storageBuilder_.dispose();
          storageBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getStorageFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
        if (storageBuilder_ != null) {
          return storageBuilder_.getMessageOrBuilder();
        } else {
          return storage_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
        }
      }
      /**
       * <pre>
       * Storage
       * </pre>
       *
       * <code>required .hadoop.hdfs.DatanodeStorageProto storage = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> 
          getStorageFieldBuilder() {
        if (storageBuilder_ == null) {
          storageBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
                  getStorage(),
                  getParentForChildren(),
                  isClean());
          storage_ = null;
        }
        return storageBuilder_;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.LongList blocks_ = emptyLongList();
      private void ensureBlocksIsMutable() {
        if (!blocks_.isModifiable()) {
          blocks_ = makeMutableCopy(blocks_);
        }
        bitField0_ |= 0x00000002;
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @return A list containing the blocks.
       */
      public java.util.List<java.lang.Long>
          getBlocksList() {
        blocks_.makeImmutable();
        return blocks_;
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @return The count of blocks.
       */
      public int getBlocksCount() {
        return blocks_.size();
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The blocks at the given index.
       */
      public long getBlocks(int index) {
        return blocks_.getLong(index);
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @param index The index to set the value at.
       * @param value The blocks to set.
       * @return This builder for chaining.
       */
      public Builder setBlocks(
          int index, long value) {

        ensureBlocksIsMutable();
        blocks_.setLong(index, value);
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @param value The blocks to add.
       * @return This builder for chaining.
       */
      public Builder addBlocks(long value) {

        ensureBlocksIsMutable();
        blocks_.addLong(value);
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @param values The blocks to add.
       * @return This builder for chaining.
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends java.lang.Long> values) {
        ensureBlocksIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, blocks_);
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blocks = 2 [packed = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlocks() {
        blocks_ = emptyLongList();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }

      private long numberOfBlocks_ ;
      /**
       * <code>optional uint64 numberOfBlocks = 3;</code>
       * @return Whether the numberOfBlocks field is set.
       */
      @java.lang.Override
      public boolean hasNumberOfBlocks() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 numberOfBlocks = 3;</code>
       * @return The numberOfBlocks.
       */
      @java.lang.Override
      public long getNumberOfBlocks() {
        return numberOfBlocks_;
      }
      /**
       * <code>optional uint64 numberOfBlocks = 3;</code>
       * @param value The numberOfBlocks to set.
       * @return This builder for chaining.
       */
      public Builder setNumberOfBlocks(long value) {

        numberOfBlocks_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 numberOfBlocks = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumberOfBlocks() {
        bitField0_ = (bitField0_ & ~0x00000004);
        numberOfBlocks_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.ProtobufList<org.apache.hadoop.thirdparty.protobuf.ByteString> blocksBuffers_ = emptyList(org.apache.hadoop.thirdparty.protobuf.ByteString.class);
      private void ensureBlocksBuffersIsMutable() {
        if (!blocksBuffers_.isModifiable()) {
          blocksBuffers_ = makeMutableCopy(blocksBuffers_);
        }
        bitField0_ |= 0x00000008;
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @return A list containing the blocksBuffers.
       */
      public java.util.List<org.apache.hadoop.thirdparty.protobuf.ByteString>
          getBlocksBuffersList() {
        blocksBuffers_.makeImmutable();
        return blocksBuffers_;
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @return The count of blocksBuffers.
       */
      public int getBlocksBuffersCount() {
        return blocksBuffers_.size();
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @param index The index of the element to return.
       * @return The blocksBuffers at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString getBlocksBuffers(int index) {
        return blocksBuffers_.get(index);
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @param index The index to set the value at.
       * @param value The blocksBuffers to set.
       * @return This builder for chaining.
       */
      public Builder setBlocksBuffers(
          int index, org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureBlocksBuffersIsMutable();
        blocksBuffers_.set(index, value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @param value The blocksBuffers to add.
       * @return This builder for chaining.
       */
      public Builder addBlocksBuffers(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureBlocksBuffersIsMutable();
        blocksBuffers_.add(value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @param values The blocksBuffers to add.
       * @return This builder for chaining.
       */
      public Builder addAllBlocksBuffers(
          java.lang.Iterable<? extends org.apache.hadoop.thirdparty.protobuf.ByteString> values) {
        ensureBlocksBuffersIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, blocksBuffers_);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated bytes blocksBuffers = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlocksBuffers() {
        blocksBuffers_ = emptyList(org.apache.hadoop.thirdparty.protobuf.ByteString.class);
        bitField0_ = (bitField0_ & ~0x00000008);
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.StorageBlockReportProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.StorageBlockReportProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageBlockReportProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageBlockReportProto>() {
      @java.lang.Override
      public StorageBlockReportProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageBlockReportProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageBlockReportProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockReportResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockReportResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return Whether the cmd field is set.
     */
    boolean hasCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return The cmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder();
  }
  /**
   * <pre>
   **
   * cmd - Command from namenode to the datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockReportResponseProto}
   */
  public static final class BlockReportResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockReportResponseProto)
      BlockReportResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockReportResponseProto.newBuilder() to construct.
    private BlockReportResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockReportResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockReportResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int CMD_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return Whether the cmd field is set.
     */
    @java.lang.Override
    public boolean hasCmd() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return The cmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
      return cmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
      return cmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (hasCmd()) {
        if (!getCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getCmd());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getCmd());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) obj;

      if (hasCmd() != other.hasCmd()) return false;
      if (hasCmd()) {
        if (!getCmd()
            .equals(other.getCmd())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCmd()) {
        hash = (37 * hash) + CMD_FIELD_NUMBER;
        hash = (53 * hash) + getCmd().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * cmd - Command from namenode to the datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockReportResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockReportResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getCmdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        cmd_ = null;
        if (cmdBuilder_ != null) {
          cmdBuilder_.dispose();
          cmdBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.cmd_ = cmdBuilder_ == null
              ? cmd_
              : cmdBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()) return this;
        if (other.hasCmd()) {
          mergeCmd(other.getCmd());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (hasCmd()) {
          if (!getCmd().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       * @return Whether the cmd field is set.
       */
      public boolean hasCmd() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       * @return The cmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
        if (cmdBuilder_ == null) {
          return cmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
        } else {
          return cmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          cmd_ = value;
        } else {
          cmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder setCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
        if (cmdBuilder_ == null) {
          cmd_ = builderForValue.build();
        } else {
          cmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            cmd_ != null &&
            cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) {
            getCmdBuilder().mergeFrom(value);
          } else {
            cmd_ = value;
          }
        } else {
          cmdBuilder_.mergeFrom(value);
        }
        if (cmd_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder clearCmd() {
        bitField0_ = (bitField0_ & ~0x00000001);
        cmd_ = null;
        if (cmdBuilder_ != null) {
          cmdBuilder_.dispose();
          cmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
        if (cmdBuilder_ != null) {
          return cmdBuilder_.getMessageOrBuilder();
        } else {
          return cmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> 
          getCmdFieldBuilder() {
        if (cmdBuilder_ == null) {
          cmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
                  getCmd(),
                  getParentForChildren(),
                  isClean());
          cmd_ = null;
        }
        return cmdBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReportResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReportResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockReportResponseProto>() {
      @java.lang.Override
      public BlockReportResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockReportResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CacheReportRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.CacheReportRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    boolean hasRegistration();
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();

    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>repeated uint64 blocks = 3 [packed = true];</code>
     * @return A list containing the blocks.
     */
    java.util.List<java.lang.Long> getBlocksList();
    /**
     * <code>repeated uint64 blocks = 3 [packed = true];</code>
     * @return The count of blocks.
     */
    int getBlocksCount();
    /**
     * <code>repeated uint64 blocks = 3 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The blocks at the given index.
     */
    long getBlocks(int index);
  }
  /**
   * <pre>
   **
   * registration - datanode registration information
   * blockPoolId  - block pool ID of the reported blocks
   * blocks       - representation of blocks as longs for efficiency reasons
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.CacheReportRequestProto}
   */
  public static final class CacheReportRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.CacheReportRequestProto)
      CacheReportRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CacheReportRequestProto.newBuilder() to construct.
    private CacheReportRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CacheReportRequestProto() {
      blockPoolId_ = "";
      blocks_ = emptyLongList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CacheReportRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int REGISTRATION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    @java.lang.Override
    public boolean hasRegistration() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.LongList blocks_ =
        emptyLongList();
    /**
     * <code>repeated uint64 blocks = 3 [packed = true];</code>
     * @return A list containing the blocks.
     */
    @java.lang.Override
    public java.util.List<java.lang.Long>
        getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated uint64 blocks = 3 [packed = true];</code>
     * @return The count of blocks.
     */
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated uint64 blocks = 3 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The blocks at the given index.
     */
    public long getBlocks(int index) {
      return blocks_.getLong(index);
    }
    private int blocksMemoizedSerializedSize = -1;

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistration()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistration().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistration());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_);
      }
      if (getBlocksList().size() > 0) {
        output.writeUInt32NoTag(26);
        output.writeUInt32NoTag(blocksMemoizedSerializedSize);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeUInt64NoTag(blocks_.getLong(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistration());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < blocks_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64SizeNoTag(blocks_.getLong(i));
        }
        size += dataSize;
        if (!getBlocksList().isEmpty()) {
          size += 1;
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeInt32SizeNoTag(dataSize);
        }
        blocksMemoizedSerializedSize = dataSize;
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto) obj;

      if (hasRegistration() != other.hasRegistration()) return false;
      if (hasRegistration()) {
        if (!getRegistration()
            .equals(other.getRegistration())) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistration()) {
        hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistration().hashCode();
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registration - datanode registration information
     * blockPoolId  - block pool ID of the reported blocks
     * blocks       - representation of blocks as longs for efficiency reasons
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.CacheReportRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.CacheReportRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistrationFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        blockPoolId_ = "";
        blocks_ = emptyLongList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registration_ = registrationBuilder_ == null
              ? registration_
              : registrationBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          blocks_.makeImmutable();
          result.blocks_ = blocks_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.getDefaultInstance()) return this;
        if (other.hasRegistration()) {
          mergeRegistration(other.getRegistration());
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (!other.blocks_.isEmpty()) {
          if (blocks_.isEmpty()) {
            blocks_ = other.blocks_;
            blocks_.makeImmutable();
            bitField0_ |= 0x00000004;
          } else {
            ensureBlocksIsMutable();
            blocks_.addAll(other.blocks_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistration()) {
          return false;
        }
        if (!hasBlockPoolId()) {
          return false;
        }
        if (!getRegistration().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistrationFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                long v = input.readUInt64();
                ensureBlocksIsMutable();
                blocks_.addLong(v);
                break;
              } // case 24
              case 26: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                ensureBlocksIsMutable();
                while (input.getBytesUntilLimit() > 0) {
                  blocks_.addLong(input.readUInt64());
                }
                input.popLimit(limit);
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return Whether the registration field is set.
       */
      public boolean hasRegistration() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return The registration.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
        if (registrationBuilder_ == null) {
          return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        } else {
          return registrationBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registration_ = value;
        } else {
          registrationBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registrationBuilder_ == null) {
          registration_ = builderForValue.build();
        } else {
          registrationBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registration_ != null &&
            registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistrationBuilder().mergeFrom(value);
          } else {
            registration_ = value;
          }
        } else {
          registrationBuilder_.mergeFrom(value);
        }
        if (registration_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder clearRegistration() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistrationFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
        if (registrationBuilder_ != null) {
          return registrationBuilder_.getMessageOrBuilder();
        } else {
          return registration_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistrationFieldBuilder() {
        if (registrationBuilder_ == null) {
          registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistration(),
                  getParentForChildren(),
                  isClean());
          registration_ = null;
        }
        return registrationBuilder_;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.LongList blocks_ = emptyLongList();
      private void ensureBlocksIsMutable() {
        if (!blocks_.isModifiable()) {
          blocks_ = makeMutableCopy(blocks_);
        }
        bitField0_ |= 0x00000004;
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @return A list containing the blocks.
       */
      public java.util.List<java.lang.Long>
          getBlocksList() {
        blocks_.makeImmutable();
        return blocks_;
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @return The count of blocks.
       */
      public int getBlocksCount() {
        return blocks_.size();
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The blocks at the given index.
       */
      public long getBlocks(int index) {
        return blocks_.getLong(index);
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @param index The index to set the value at.
       * @param value The blocks to set.
       * @return This builder for chaining.
       */
      public Builder setBlocks(
          int index, long value) {

        ensureBlocksIsMutable();
        blocks_.setLong(index, value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @param value The blocks to add.
       * @return This builder for chaining.
       */
      public Builder addBlocks(long value) {

        ensureBlocksIsMutable();
        blocks_.addLong(value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @param values The blocks to add.
       * @return This builder for chaining.
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends java.lang.Long> values) {
        ensureBlocksIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, blocks_);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 blocks = 3 [packed = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlocks() {
        blocks_ = emptyLongList();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.CacheReportRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.CacheReportRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CacheReportRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CacheReportRequestProto>() {
      @java.lang.Override
      public CacheReportRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CacheReportRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CacheReportRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CacheReportResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.CacheReportResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return Whether the cmd field is set.
     */
    boolean hasCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return The cmd.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd();
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.datanode.CacheReportResponseProto}
   */
  public static final class CacheReportResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.CacheReportResponseProto)
      CacheReportResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CacheReportResponseProto.newBuilder() to construct.
    private CacheReportResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CacheReportResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CacheReportResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int CMD_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return Whether the cmd field is set.
     */
    @java.lang.Override
    public boolean hasCmd() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     * @return The cmd.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
      return cmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
    }
    /**
     * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
      return cmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (hasCmd()) {
        if (!getCmd().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getCmd());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getCmd());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto) obj;

      if (hasCmd() != other.hasCmd()) return false;
      if (hasCmd()) {
        if (!getCmd()
            .equals(other.getCmd())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCmd()) {
        hash = (37 * hash) + CMD_FIELD_NUMBER;
        hash = (53 * hash) + getCmd().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.datanode.CacheReportResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.CacheReportResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getCmdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        cmd_ = null;
        if (cmdBuilder_ != null) {
          cmdBuilder_.dispose();
          cmdBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.cmd_ = cmdBuilder_ == null
              ? cmd_
              : cmdBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance()) return this;
        if (other.hasCmd()) {
          mergeCmd(other.getCmd());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (hasCmd()) {
          if (!getCmd().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getCmdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_;
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       * @return Whether the cmd field is set.
       */
      public boolean hasCmd() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       * @return The cmd.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
        if (cmdBuilder_ == null) {
          return cmd_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
        } else {
          return cmdBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          cmd_ = value;
        } else {
          cmdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder setCmd(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
        if (cmdBuilder_ == null) {
          cmd_ = builderForValue.build();
        } else {
          cmdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
        if (cmdBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            cmd_ != null &&
            cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) {
            getCmdBuilder().mergeFrom(value);
          } else {
            cmd_ = value;
          }
        } else {
          cmdBuilder_.mergeFrom(value);
        }
        if (cmd_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public Builder clearCmd() {
        bitField0_ = (bitField0_ & ~0x00000001);
        cmd_ = null;
        if (cmdBuilder_ != null) {
          cmdBuilder_.dispose();
          cmdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getCmdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
        if (cmdBuilder_ != null) {
          return cmdBuilder_.getMessageOrBuilder();
        } else {
          return cmd_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance() : cmd_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.datanode.DatanodeCommandProto cmd = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> 
          getCmdFieldBuilder() {
        if (cmdBuilder_ == null) {
          cmdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
                  getCmd(),
                  getParentForChildren(),
                  isClean());
          cmd_ = null;
        }
        return cmdBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.CacheReportResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.CacheReportResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CacheReportResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CacheReportResponseProto>() {
      @java.lang.Override
      public CacheReportResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CacheReportResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CacheReportResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReceivedDeletedBlockInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
     * @return Whether the block field is set.
     */
    boolean hasBlock();
    /**
     * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
     * @return The block.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
    /**
     * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();

    /**
     * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus();

    /**
     * <code>optional string deleteHint = 2;</code>
     * @return Whether the deleteHint field is set.
     */
    boolean hasDeleteHint();
    /**
     * <code>optional string deleteHint = 2;</code>
     * @return The deleteHint.
     */
    java.lang.String getDeleteHint();
    /**
     * <code>optional string deleteHint = 2;</code>
     * @return The bytes for deleteHint.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getDeleteHintBytes();
  }
  /**
   * <pre>
   **
   * Data structure to send received or deleted block information
   * from datanode to namenode.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto}
   */
  public static final class ReceivedDeletedBlockInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
      ReceivedDeletedBlockInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReceivedDeletedBlockInfoProto.newBuilder() to construct.
    private ReceivedDeletedBlockInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReceivedDeletedBlockInfoProto() {
      status_ = 1;
      deleteHint_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReceivedDeletedBlockInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus}
     */
    public enum BlockStatus
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <pre>
       * block being created
       * </pre>
       *
       * <code>RECEIVING = 1;</code>
       */
      RECEIVING(1),
      /**
       * <pre>
       * block creation complete
       * </pre>
       *
       * <code>RECEIVED = 2;</code>
       */
      RECEIVED(2),
      /**
       * <code>DELETED = 3;</code>
       */
      DELETED(3),
      ;

      /**
       * <pre>
       * block being created
       * </pre>
       *
       * <code>RECEIVING = 1;</code>
       */
      public static final int RECEIVING_VALUE = 1;
      /**
       * <pre>
       * block creation complete
       * </pre>
       *
       * <code>RECEIVED = 2;</code>
       */
      public static final int RECEIVED_VALUE = 2;
      /**
       * <code>DELETED = 3;</code>
       */
      public static final int DELETED_VALUE = 3;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static BlockStatus valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static BlockStatus forNumber(int value) {
        switch (value) {
          case 1: return RECEIVING;
          case 2: return RECEIVED;
          case 3: return DELETED;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockStatus>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          BlockStatus> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockStatus>() {
              public BlockStatus findValueByNumber(int number) {
                return BlockStatus.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final BlockStatus[] VALUES = values();

      public static BlockStatus valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private BlockStatus(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus)
    }

    private int bitField0_;
    public static final int BLOCK_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
    /**
     * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
     * @return Whether the block field is set.
     */
    @java.lang.Override
    public boolean hasBlock() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
     * @return The block.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
      return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance() : block_;
    }
    /**
     * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
      return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance() : block_;
    }

    public static final int STATUS_FIELD_NUMBER = 3;
    private int status_ = 1;
    /**
     * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING : result;
    }

    public static final int DELETEHINT_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object deleteHint_ = "";
    /**
     * <code>optional string deleteHint = 2;</code>
     * @return Whether the deleteHint field is set.
     */
    @java.lang.Override
    public boolean hasDeleteHint() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional string deleteHint = 2;</code>
     * @return The deleteHint.
     */
    @java.lang.Override
    public java.lang.String getDeleteHint() {
      java.lang.Object ref = deleteHint_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          deleteHint_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string deleteHint = 2;</code>
     * @return The bytes for deleteHint.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getDeleteHintBytes() {
      java.lang.Object ref = deleteHint_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        deleteHint_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getBlock().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getBlock());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, deleteHint_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeEnum(3, status_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getBlock());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, deleteHint_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(3, status_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) obj;

      if (hasBlock() != other.hasBlock()) return false;
      if (hasBlock()) {
        if (!getBlock()
            .equals(other.getBlock())) return false;
      }
      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (hasDeleteHint() != other.hasDeleteHint()) return false;
      if (hasDeleteHint()) {
        if (!getDeleteHint()
            .equals(other.getDeleteHint())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlock()) {
        hash = (37 * hash) + BLOCK_FIELD_NUMBER;
        hash = (53 * hash) + getBlock().hashCode();
      }
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      if (hasDeleteHint()) {
        hash = (37 * hash) + DELETEHINT_FIELD_NUMBER;
        hash = (53 * hash) + getDeleteHint().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Data structure to send received or deleted block information
     * from datanode to namenode.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBlockFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        block_ = null;
        if (blockBuilder_ != null) {
          blockBuilder_.dispose();
          blockBuilder_ = null;
        }
        status_ = 1;
        deleteHint_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.block_ = blockBuilder_ == null
              ? block_
              : blockBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.deleteHint_ = deleteHint_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()) return this;
        if (other.hasBlock()) {
          mergeBlock(other.getBlock());
        }
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        if (other.hasDeleteHint()) {
          deleteHint_ = other.deleteHint_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlock()) {
          return false;
        }
        if (!hasStatus()) {
          return false;
        }
        if (!getBlock().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getBlockFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                deleteHint_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 18
              case 24: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(3, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000002;
                }
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_;
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       * @return Whether the block field is set.
       */
      public boolean hasBlock() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       * @return The block.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
        if (blockBuilder_ == null) {
          return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance() : block_;
        } else {
          return blockBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (blockBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          block_ = value;
        } else {
          blockBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      public Builder setBlock(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
        if (blockBuilder_ == null) {
          block_ = builderForValue.build();
        } else {
          blockBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
        if (blockBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            block_ != null &&
            block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
            getBlockBuilder().mergeFrom(value);
          } else {
            block_ = value;
          }
        } else {
          blockBuilder_.mergeFrom(value);
        }
        if (block_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      public Builder clearBlock() {
        bitField0_ = (bitField0_ & ~0x00000001);
        block_ = null;
        if (blockBuilder_ != null) {
          blockBuilder_.dispose();
          blockBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getBlockFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
        if (blockBuilder_ != null) {
          return blockBuilder_.getMessageOrBuilder();
        } else {
          return block_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance() : block_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BlockProto block = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getBlockFieldBuilder() {
        if (blockBuilder_ == null) {
          blockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                  getBlock(),
                  getParentForChildren(),
                  isClean());
          block_ = null;
        }
        return blockBuilder_;
      }

      private int status_ = 1;
      /**
       * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus result = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING : result;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.BlockStatus value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000002;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto.BlockStatus status = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000002);
        status_ = 1;
        onChanged();
        return this;
      }

      private java.lang.Object deleteHint_ = "";
      /**
       * <code>optional string deleteHint = 2;</code>
       * @return Whether the deleteHint field is set.
       */
      public boolean hasDeleteHint() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string deleteHint = 2;</code>
       * @return The deleteHint.
       */
      public java.lang.String getDeleteHint() {
        java.lang.Object ref = deleteHint_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            deleteHint_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string deleteHint = 2;</code>
       * @return The bytes for deleteHint.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getDeleteHintBytes() {
        java.lang.Object ref = deleteHint_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          deleteHint_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string deleteHint = 2;</code>
       * @param value The deleteHint to set.
       * @return This builder for chaining.
       */
      public Builder setDeleteHint(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        deleteHint_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional string deleteHint = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearDeleteHint() {
        deleteHint_ = getDefaultInstance().getDeleteHint();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>optional string deleteHint = 2;</code>
       * @param value The bytes for deleteHint to set.
       * @return This builder for chaining.
       */
      public Builder setDeleteHintBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        deleteHint_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReceivedDeletedBlockInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReceivedDeletedBlockInfoProto>() {
      @java.lang.Override
      public ReceivedDeletedBlockInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReceivedDeletedBlockInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReceivedDeletedBlockInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageReceivedDeletedBlocksProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
     *     See DatanodeProtocol.proto;l=317
     * @return Whether the storageUuid field is set.
     */
    @java.lang.Deprecated boolean hasStorageUuid();
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
     *     See DatanodeProtocol.proto;l=317
     * @return The storageUuid.
     */
    @java.lang.Deprecated java.lang.String getStorageUuid();
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
     *     See DatanodeProtocol.proto;l=317
     * @return The bytes for storageUuid.
     */
    @java.lang.Deprecated org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidBytes();

    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> 
        getBlocksList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index);
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    int getBlocksCount();
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> 
        getBlocksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
        int index);

    /**
     * <pre>
     * supersedes storageUuid.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
     * @return Whether the storage field is set.
     */
    boolean hasStorage();
    /**
     * <pre>
     * supersedes storageUuid.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
     * @return The storage.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
    /**
     * <pre>
     * supersedes storageUuid.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();
  }
  /**
   * <pre>
   **
   * List of blocks received and deleted for a storage.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto}
   */
  public static final class StorageReceivedDeletedBlocksProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
      StorageReceivedDeletedBlocksProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageReceivedDeletedBlocksProto.newBuilder() to construct.
    private StorageReceivedDeletedBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageReceivedDeletedBlocksProto() {
      storageUuid_ = "";
      blocks_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageReceivedDeletedBlocksProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder.class);
    }

    private int bitField0_;
    public static final int STORAGEUUID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object storageUuid_ = "";
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
     *     See DatanodeProtocol.proto;l=317
     * @return Whether the storageUuid field is set.
     */
    @java.lang.Override
    @java.lang.Deprecated public boolean hasStorageUuid() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
     *     See DatanodeProtocol.proto;l=317
     * @return The storageUuid.
     */
    @java.lang.Override
    @java.lang.Deprecated public java.lang.String getStorageUuid() {
      java.lang.Object ref = storageUuid_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          storageUuid_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
     *     See DatanodeProtocol.proto;l=317
     * @return The bytes for storageUuid.
     */
    @java.lang.Override
    @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidBytes() {
      java.lang.Object ref = storageUuid_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        storageUuid_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> blocks_;
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> 
        getBlocksOrBuilderList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    @java.lang.Override
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
      return blocks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
        int index) {
      return blocks_.get(index);
    }

    public static final int STORAGE_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
    /**
     * <pre>
     * supersedes storageUuid.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
     * @return Whether the storage field is set.
     */
    @java.lang.Override
    public boolean hasStorage() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * supersedes storageUuid.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
     * @return The storage.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
      return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
    }
    /**
     * <pre>
     * supersedes storageUuid.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
      return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStorageUuid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getBlocksCount(); i++) {
        if (!getBlocks(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasStorage()) {
        if (!getStorage().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeMessage(2, blocks_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(3, getStorage());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, blocks_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getStorage());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) obj;

      if (hasStorageUuid() != other.hasStorageUuid()) return false;
      if (hasStorageUuid()) {
        if (!getStorageUuid()
            .equals(other.getStorageUuid())) return false;
      }
      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (hasStorage() != other.hasStorage()) return false;
      if (hasStorage()) {
        if (!getStorage()
            .equals(other.getStorage())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStorageUuid()) {
        hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
        hash = (53 * hash) + getStorageUuid().hashCode();
      }
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      if (hasStorage()) {
        hash = (37 * hash) + STORAGE_FIELD_NUMBER;
        hash = (53 * hash) + getStorage().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * List of blocks received and deleted for a storage.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBlocksFieldBuilder();
          getStorageFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        storageUuid_ = "";
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
        } else {
          blocks_ = null;
          blocksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        storage_ = null;
        if (storageBuilder_ != null) {
          storageBuilder_.dispose();
          storageBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result) {
        if (blocksBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            blocks_ = java.util.Collections.unmodifiableList(blocks_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.blocks_ = blocks_;
        } else {
          result.blocks_ = blocksBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.storageUuid_ = storageUuid_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.storage_ = storageBuilder_ == null
              ? storage_
              : storageBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance()) return this;
        if (other.hasStorageUuid()) {
          storageUuid_ = other.storageUuid_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (blocksBuilder_ == null) {
          if (!other.blocks_.isEmpty()) {
            if (blocks_.isEmpty()) {
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureBlocksIsMutable();
              blocks_.addAll(other.blocks_);
            }
            onChanged();
          }
        } else {
          if (!other.blocks_.isEmpty()) {
            if (blocksBuilder_.isEmpty()) {
              blocksBuilder_.dispose();
              blocksBuilder_ = null;
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000002);
              blocksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlocksFieldBuilder() : null;
            } else {
              blocksBuilder_.addAllMessages(other.blocks_);
            }
          }
        }
        if (other.hasStorage()) {
          mergeStorage(other.getStorage());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStorageUuid()) {
          return false;
        }
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            return false;
          }
        }
        if (hasStorage()) {
          if (!getStorage().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                storageUuid_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.PARSER,
                        extensionRegistry);
                if (blocksBuilder_ == null) {
                  ensureBlocksIsMutable();
                  blocks_.add(m);
                } else {
                  blocksBuilder_.addMessage(m);
                }
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getStorageFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object storageUuid_ = "";
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
       *     See DatanodeProtocol.proto;l=317
       * @return Whether the storageUuid field is set.
       */
      @java.lang.Deprecated public boolean hasStorageUuid() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
       *     See DatanodeProtocol.proto;l=317
       * @return The storageUuid.
       */
      @java.lang.Deprecated public java.lang.String getStorageUuid() {
        java.lang.Object ref = storageUuid_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            storageUuid_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
       *     See DatanodeProtocol.proto;l=317
       * @return The bytes for storageUuid.
       */
      @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageUuidBytes() {
        java.lang.Object ref = storageUuid_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          storageUuid_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
       *     See DatanodeProtocol.proto;l=317
       * @param value The storageUuid to set.
       * @return This builder for chaining.
       */
      @java.lang.Deprecated public Builder setStorageUuid(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        storageUuid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
       *     See DatanodeProtocol.proto;l=317
       * @return This builder for chaining.
       */
      @java.lang.Deprecated public Builder clearStorageUuid() {
        storageUuid_ = getDefaultInstance().getStorageUuid();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto.storageUuid is deprecated.
       *     See DatanodeProtocol.proto;l=317
       * @param value The bytes for storageUuid to set.
       * @return This builder for chaining.
       */
      @java.lang.Deprecated public Builder setStorageUuidBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        storageUuid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> blocks_ =
        java.util.Collections.emptyList();
      private void ensureBlocksIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto>(blocks_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> blocksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> getBlocksList() {
        if (blocksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blocks_);
        } else {
          return blocksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public int getBlocksCount() {
        if (blocksBuilder_ == null) {
          return blocks_.size();
        } else {
          return blocksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);
        } else {
          return blocksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.set(index, value);
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.set(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(index, value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder addBlocks(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> values) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blocks_);
          onChanged();
        } else {
          blocksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder clearBlocks() {
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          blocksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public Builder removeBlocks(int index) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.remove(index);
          onChanged();
        } else {
          blocksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder getBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
          int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);  } else {
          return blocksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> 
           getBlocksOrBuilderList() {
        if (blocksBuilder_ != null) {
          return blocksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blocks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder() {
        return getBlocksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.ReceivedDeletedBlockInfoProto blocks = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder> 
           getBlocksBuilderList() {
        return getBlocksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> 
          getBlocksFieldBuilder() {
        if (blocksBuilder_ == null) {
          blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>(
                  blocks_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          blocks_ = null;
        }
        return blocksBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       * @return Whether the storage field is set.
       */
      public boolean hasStorage() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       * @return The storage.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
        if (storageBuilder_ == null) {
          return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
        } else {
          return storageBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
        if (storageBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          storage_ = value;
        } else {
          storageBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      public Builder setStorage(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
        if (storageBuilder_ == null) {
          storage_ = builderForValue.build();
        } else {
          storageBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
        if (storageBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            storage_ != null &&
            storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
            getStorageBuilder().mergeFrom(value);
          } else {
            storage_ = value;
          }
        } else {
          storageBuilder_.mergeFrom(value);
        }
        if (storage_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      public Builder clearStorage() {
        bitField0_ = (bitField0_ & ~0x00000004);
        storage_ = null;
        if (storageBuilder_ != null) {
          storageBuilder_.dispose();
          storageBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getStorageFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
        if (storageBuilder_ != null) {
          return storageBuilder_.getMessageOrBuilder();
        } else {
          return storage_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
        }
      }
      /**
       * <pre>
       * supersedes storageUuid.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> 
          getStorageFieldBuilder() {
        if (storageBuilder_ == null) {
          storageBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
                  getStorage(),
                  getParentForChildren(),
                  isClean());
          storage_ = null;
        }
        return storageBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageReceivedDeletedBlocksProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageReceivedDeletedBlocksProto>() {
      @java.lang.Override
      public StorageReceivedDeletedBlocksProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageReceivedDeletedBlocksProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageReceivedDeletedBlocksProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockReceivedAndDeletedRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    boolean hasRegistration();
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();

    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> 
        getBlocksList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index);
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    int getBlocksCount();
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> 
        getBlocksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * registration - datanode registration information
   * blockPoolID  - block pool ID of the reported blocks
   * blocks       - Received/deleted block list
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto}
   */
  public static final class BlockReceivedAndDeletedRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
      BlockReceivedAndDeletedRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockReceivedAndDeletedRequestProto.newBuilder() to construct.
    private BlockReceivedAndDeletedRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockReceivedAndDeletedRequestProto() {
      blockPoolId_ = "";
      blocks_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockReceivedAndDeletedRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int REGISTRATION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return Whether the registration field is set.
     */
    @java.lang.Override
    public boolean hasRegistration() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     * @return The registration.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }
    /**
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
      return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> blocks_;
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> 
        getBlocksOrBuilderList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    @java.lang.Override
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index) {
      return blocks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
        int index) {
      return blocks_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistration()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistration().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getBlocksCount(); i++) {
        if (!getBlocks(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistration());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeMessage(3, blocks_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistration());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, blocks_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) obj;

      if (hasRegistration() != other.hasRegistration()) return false;
      if (hasRegistration()) {
        if (!getRegistration()
            .equals(other.getRegistration())) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistration()) {
        hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistration().hashCode();
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registration - datanode registration information
     * blockPoolID  - block pool ID of the reported blocks
     * blocks       - Received/deleted block list
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistrationFieldBuilder();
          getBlocksFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        blockPoolId_ = "";
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
        } else {
          blocks_ = null;
          blocksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000004);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result) {
        if (blocksBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0)) {
            blocks_ = java.util.Collections.unmodifiableList(blocks_);
            bitField0_ = (bitField0_ & ~0x00000004);
          }
          result.blocks_ = blocks_;
        } else {
          result.blocks_ = blocksBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registration_ = registrationBuilder_ == null
              ? registration_
              : registrationBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance()) return this;
        if (other.hasRegistration()) {
          mergeRegistration(other.getRegistration());
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (blocksBuilder_ == null) {
          if (!other.blocks_.isEmpty()) {
            if (blocks_.isEmpty()) {
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000004);
            } else {
              ensureBlocksIsMutable();
              blocks_.addAll(other.blocks_);
            }
            onChanged();
          }
        } else {
          if (!other.blocks_.isEmpty()) {
            if (blocksBuilder_.isEmpty()) {
              blocksBuilder_.dispose();
              blocksBuilder_ = null;
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000004);
              blocksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlocksFieldBuilder() : null;
            } else {
              blocksBuilder_.addAllMessages(other.blocks_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistration()) {
          return false;
        }
        if (!hasBlockPoolId()) {
          return false;
        }
        if (!getRegistration().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistrationFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.PARSER,
                        extensionRegistry);
                if (blocksBuilder_ == null) {
                  ensureBlocksIsMutable();
                  blocks_.add(m);
                } else {
                  blocksBuilder_.addMessage(m);
                }
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return Whether the registration field is set.
       */
      public boolean hasRegistration() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       * @return The registration.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
        if (registrationBuilder_ == null) {
          return registration_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        } else {
          return registrationBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registration_ = value;
        } else {
          registrationBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder setRegistration(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registrationBuilder_ == null) {
          registration_ = builderForValue.build();
        } else {
          registrationBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registrationBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registration_ != null &&
            registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistrationBuilder().mergeFrom(value);
          } else {
            registration_ = value;
          }
        } else {
          registrationBuilder_.mergeFrom(value);
        }
        if (registration_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public Builder clearRegistration() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registration_ = null;
        if (registrationBuilder_ != null) {
          registrationBuilder_.dispose();
          registrationBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistrationFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
        if (registrationBuilder_ != null) {
          return registrationBuilder_.getMessageOrBuilder();
        } else {
          return registration_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registration_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registration = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistrationFieldBuilder() {
        if (registrationBuilder_ == null) {
          registrationBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistration(),
                  getParentForChildren(),
                  isClean());
          registration_ = null;
        }
        return registrationBuilder_;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> blocks_ =
        java.util.Collections.emptyList();
      private void ensureBlocksIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto>(blocks_);
          bitField0_ |= 0x00000004;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> blocksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> getBlocksList() {
        if (blocksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blocks_);
        } else {
          return blocksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public int getBlocksCount() {
        if (blocksBuilder_ == null) {
          return blocks_.size();
        } else {
          return blocksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto getBlocks(int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);
        } else {
          return blocksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.set(index, value);
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.set(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(index, value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder addBlocks(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto> values) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blocks_);
          onChanged();
        } else {
          blocksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder clearBlocks() {
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
        } else {
          blocksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public Builder removeBlocks(int index) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.remove(index);
          onChanged();
        } else {
          blocksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder getBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder getBlocksOrBuilder(
          int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);  } else {
          return blocksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> 
           getBlocksOrBuilderList() {
        if (blocksBuilder_ != null) {
          return blocksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blocks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder addBlocksBuilder() {
        return getBlocksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder addBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.datanode.StorageReceivedDeletedBlocksProto blocks = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder> 
           getBlocksBuilderList() {
        return getBlocksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder> 
          getBlocksFieldBuilder() {
        if (blocksBuilder_ == null) {
          blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProtoOrBuilder>(
                  blocks_,
                  ((bitField0_ & 0x00000004) != 0),
                  getParentForChildren(),
                  isClean());
          blocks_ = null;
        }
        return blocksBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockReceivedAndDeletedRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockReceivedAndDeletedRequestProto>() {
      @java.lang.Override
      public BlockReceivedAndDeletedRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockReceivedAndDeletedRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockReceivedAndDeletedRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockReceivedAndDeletedResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * void response
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto}
   */
  public static final class BlockReceivedAndDeletedResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
      BlockReceivedAndDeletedResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockReceivedAndDeletedResponseProto.newBuilder() to construct.
    private BlockReceivedAndDeletedResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockReceivedAndDeletedResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockReceivedAndDeletedResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * void response
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockReceivedAndDeletedResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockReceivedAndDeletedResponseProto>() {
      @java.lang.Override
      public BlockReceivedAndDeletedResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockReceivedAndDeletedResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockReceivedAndDeletedResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ErrorReportRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.ErrorReportRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Registartion info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
     * @return Whether the registartion field is set.
     */
    boolean hasRegistartion();
    /**
     * <pre>
     * Registartion info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
     * @return The registartion.
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion();
    /**
     * <pre>
     * Registartion info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder();

    /**
     * <pre>
     * Error code
     * </pre>
     *
     * <code>required uint32 errorCode = 2;</code>
     * @return Whether the errorCode field is set.
     */
    boolean hasErrorCode();
    /**
     * <pre>
     * Error code
     * </pre>
     *
     * <code>required uint32 errorCode = 2;</code>
     * @return The errorCode.
     */
    int getErrorCode();

    /**
     * <pre>
     * Error message
     * </pre>
     *
     * <code>required string msg = 3;</code>
     * @return Whether the msg field is set.
     */
    boolean hasMsg();
    /**
     * <pre>
     * Error message
     * </pre>
     *
     * <code>required string msg = 3;</code>
     * @return The msg.
     */
    java.lang.String getMsg();
    /**
     * <pre>
     * Error message
     * </pre>
     *
     * <code>required string msg = 3;</code>
     * @return The bytes for msg.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getMsgBytes();
  }
  /**
   * <pre>
   **
   * registartion - Datanode reporting the error
   * errorCode - error code indicating the error
   * msg - Free text description of the error
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.ErrorReportRequestProto}
   */
  public static final class ErrorReportRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.ErrorReportRequestProto)
      ErrorReportRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ErrorReportRequestProto.newBuilder() to construct.
    private ErrorReportRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ErrorReportRequestProto() {
      msg_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ErrorReportRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.datanode.ErrorReportRequestProto.ErrorCode}
     */
    public enum ErrorCode
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <pre>
       * Error report to be logged at the namenode
       * </pre>
       *
       * <code>NOTIFY = 0;</code>
       */
      NOTIFY(0),
      /**
       * <pre>
       * DN has disk errors but still has valid volumes
       * </pre>
       *
       * <code>DISK_ERROR = 1;</code>
       */
      DISK_ERROR(1),
      /**
       * <pre>
       * Command from namenode has invalid block ID
       * </pre>
       *
       * <code>INVALID_BLOCK = 2;</code>
       */
      INVALID_BLOCK(2),
      /**
       * <pre>
       * No valid volumes left on datanode
       * </pre>
       *
       * <code>FATAL_DISK_ERROR = 3;</code>
       */
      FATAL_DISK_ERROR(3),
      ;

      /**
       * <pre>
       * Error report to be logged at the namenode
       * </pre>
       *
       * <code>NOTIFY = 0;</code>
       */
      public static final int NOTIFY_VALUE = 0;
      /**
       * <pre>
       * DN has disk errors but still has valid volumes
       * </pre>
       *
       * <code>DISK_ERROR = 1;</code>
       */
      public static final int DISK_ERROR_VALUE = 1;
      /**
       * <pre>
       * Command from namenode has invalid block ID
       * </pre>
       *
       * <code>INVALID_BLOCK = 2;</code>
       */
      public static final int INVALID_BLOCK_VALUE = 2;
      /**
       * <pre>
       * No valid volumes left on datanode
       * </pre>
       *
       * <code>FATAL_DISK_ERROR = 3;</code>
       */
      public static final int FATAL_DISK_ERROR_VALUE = 3;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static ErrorCode valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static ErrorCode forNumber(int value) {
        switch (value) {
          case 0: return NOTIFY;
          case 1: return DISK_ERROR;
          case 2: return INVALID_BLOCK;
          case 3: return FATAL_DISK_ERROR;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ErrorCode>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          ErrorCode> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ErrorCode>() {
              public ErrorCode findValueByNumber(int number) {
                return ErrorCode.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final ErrorCode[] VALUES = values();

      public static ErrorCode valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private ErrorCode(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.datanode.ErrorReportRequestProto.ErrorCode)
    }

    private int bitField0_;
    public static final int REGISTARTION_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_;
    /**
     * <pre>
     * Registartion info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
     * @return Whether the registartion field is set.
     */
    @java.lang.Override
    public boolean hasRegistartion() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Registartion info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
     * @return The registartion.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
      return registartion_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registartion_;
    }
    /**
     * <pre>
     * Registartion info
     * </pre>
     *
     * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
      return registartion_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registartion_;
    }

    public static final int ERRORCODE_FIELD_NUMBER = 2;
    private int errorCode_ = 0;
    /**
     * <pre>
     * Error code
     * </pre>
     *
     * <code>required uint32 errorCode = 2;</code>
     * @return Whether the errorCode field is set.
     */
    @java.lang.Override
    public boolean hasErrorCode() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * Error code
     * </pre>
     *
     * <code>required uint32 errorCode = 2;</code>
     * @return The errorCode.
     */
    @java.lang.Override
    public int getErrorCode() {
      return errorCode_;
    }

    public static final int MSG_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object msg_ = "";
    /**
     * <pre>
     * Error message
     * </pre>
     *
     * <code>required string msg = 3;</code>
     * @return Whether the msg field is set.
     */
    @java.lang.Override
    public boolean hasMsg() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * Error message
     * </pre>
     *
     * <code>required string msg = 3;</code>
     * @return The msg.
     */
    @java.lang.Override
    public java.lang.String getMsg() {
      java.lang.Object ref = msg_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          msg_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * Error message
     * </pre>
     *
     * <code>required string msg = 3;</code>
     * @return The bytes for msg.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getMsgBytes() {
      java.lang.Object ref = msg_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        msg_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRegistartion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasErrorCode()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasMsg()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getRegistartion().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getRegistartion());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, errorCode_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, msg_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getRegistartion());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, errorCode_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, msg_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) obj;

      if (hasRegistartion() != other.hasRegistartion()) return false;
      if (hasRegistartion()) {
        if (!getRegistartion()
            .equals(other.getRegistartion())) return false;
      }
      if (hasErrorCode() != other.hasErrorCode()) return false;
      if (hasErrorCode()) {
        if (getErrorCode()
            != other.getErrorCode()) return false;
      }
      if (hasMsg() != other.hasMsg()) return false;
      if (hasMsg()) {
        if (!getMsg()
            .equals(other.getMsg())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasRegistartion()) {
        hash = (37 * hash) + REGISTARTION_FIELD_NUMBER;
        hash = (53 * hash) + getRegistartion().hashCode();
      }
      if (hasErrorCode()) {
        hash = (37 * hash) + ERRORCODE_FIELD_NUMBER;
        hash = (53 * hash) + getErrorCode();
      }
      if (hasMsg()) {
        hash = (37 * hash) + MSG_FIELD_NUMBER;
        hash = (53 * hash) + getMsg().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * registartion - Datanode reporting the error
     * errorCode - error code indicating the error
     * msg - Free text description of the error
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.ErrorReportRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.ErrorReportRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getRegistartionFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        registartion_ = null;
        if (registartionBuilder_ != null) {
          registartionBuilder_.dispose();
          registartionBuilder_ = null;
        }
        errorCode_ = 0;
        msg_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.registartion_ = registartionBuilder_ == null
              ? registartion_
              : registartionBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.errorCode_ = errorCode_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.msg_ = msg_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this;
        if (other.hasRegistartion()) {
          mergeRegistartion(other.getRegistartion());
        }
        if (other.hasErrorCode()) {
          setErrorCode(other.getErrorCode());
        }
        if (other.hasMsg()) {
          msg_ = other.msg_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRegistartion()) {
          return false;
        }
        if (!hasErrorCode()) {
          return false;
        }
        if (!hasMsg()) {
          return false;
        }
        if (!getRegistartion().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getRegistartionFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                errorCode_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                msg_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registartionBuilder_;
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       * @return Whether the registartion field is set.
       */
      public boolean hasRegistartion() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       * @return The registartion.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
        if (registartionBuilder_ == null) {
          return registartion_ == null ? org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registartion_;
        } else {
          return registartionBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      public Builder setRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registartionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          registartion_ = value;
        } else {
          registartionBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      public Builder setRegistartion(
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
        if (registartionBuilder_ == null) {
          registartion_ = builderForValue.build();
        } else {
          registartionBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      public Builder mergeRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
        if (registartionBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            registartion_ != null &&
            registartion_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
            getRegistartionBuilder().mergeFrom(value);
          } else {
            registartion_ = value;
          }
        } else {
          registartionBuilder_.mergeFrom(value);
        }
        if (registartion_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      public Builder clearRegistartion() {
        bitField0_ = (bitField0_ & ~0x00000001);
        registartion_ = null;
        if (registartionBuilder_ != null) {
          registartionBuilder_.dispose();
          registartionBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistartionBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getRegistartionFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
        if (registartionBuilder_ != null) {
          return registartionBuilder_.getMessageOrBuilder();
        } else {
          return registartion_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance() : registartion_;
        }
      }
      /**
       * <pre>
       * Registartion info
       * </pre>
       *
       * <code>required .hadoop.hdfs.datanode.DatanodeRegistrationProto registartion = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> 
          getRegistartionFieldBuilder() {
        if (registartionBuilder_ == null) {
          registartionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
                  getRegistartion(),
                  getParentForChildren(),
                  isClean());
          registartion_ = null;
        }
        return registartionBuilder_;
      }

      private int errorCode_ ;
      /**
       * <pre>
       * Error code
       * </pre>
       *
       * <code>required uint32 errorCode = 2;</code>
       * @return Whether the errorCode field is set.
       */
      @java.lang.Override
      public boolean hasErrorCode() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * Error code
       * </pre>
       *
       * <code>required uint32 errorCode = 2;</code>
       * @return The errorCode.
       */
      @java.lang.Override
      public int getErrorCode() {
        return errorCode_;
      }
      /**
       * <pre>
       * Error code
       * </pre>
       *
       * <code>required uint32 errorCode = 2;</code>
       * @param value The errorCode to set.
       * @return This builder for chaining.
       */
      public Builder setErrorCode(int value) {

        errorCode_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Error code
       * </pre>
       *
       * <code>required uint32 errorCode = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearErrorCode() {
        bitField0_ = (bitField0_ & ~0x00000002);
        errorCode_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object msg_ = "";
      /**
       * <pre>
       * Error message
       * </pre>
       *
       * <code>required string msg = 3;</code>
       * @return Whether the msg field is set.
       */
      public boolean hasMsg() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * Error message
       * </pre>
       *
       * <code>required string msg = 3;</code>
       * @return The msg.
       */
      public java.lang.String getMsg() {
        java.lang.Object ref = msg_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            msg_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * Error message
       * </pre>
       *
       * <code>required string msg = 3;</code>
       * @return The bytes for msg.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getMsgBytes() {
        java.lang.Object ref = msg_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          msg_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * Error message
       * </pre>
       *
       * <code>required string msg = 3;</code>
       * @param value The msg to set.
       * @return This builder for chaining.
       */
      public Builder setMsg(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        msg_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Error message
       * </pre>
       *
       * <code>required string msg = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearMsg() {
        msg_ = getDefaultInstance().getMsg();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Error message
       * </pre>
       *
       * <code>required string msg = 3;</code>
       * @param value The bytes for msg to set.
       * @return This builder for chaining.
       */
      public Builder setMsgBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        msg_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ErrorReportRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ErrorReportRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ErrorReportRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ErrorReportRequestProto>() {
      @java.lang.Override
      public ErrorReportRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ErrorReportRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ErrorReportRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ErrorReportResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.ErrorReportResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * void response
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.ErrorReportResponseProto}
   */
  public static final class ErrorReportResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.ErrorReportResponseProto)
      ErrorReportResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ErrorReportResponseProto.newBuilder() to construct.
    private ErrorReportResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ErrorReportResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ErrorReportResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * void response
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.ErrorReportResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.ErrorReportResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ErrorReportResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ErrorReportResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ErrorReportResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ErrorReportResponseProto>() {
      @java.lang.Override
      public ErrorReportResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ErrorReportResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ErrorReportResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReportBadBlocksRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.ReportBadBlocksRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> 
        getBlocksList();
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    int getBlocksCount();
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
        getBlocksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * blocks - list of blocks that are reported as corrupt
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.ReportBadBlocksRequestProto}
   */
  public static final class ReportBadBlocksRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.ReportBadBlocksRequestProto)
      ReportBadBlocksRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReportBadBlocksRequestProto.newBuilder() to construct.
    private ReportBadBlocksRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReportBadBlocksRequestProto() {
      blocks_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReportBadBlocksRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class);
    }

    public static final int BLOCKS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_;
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
        getBlocksOrBuilderList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
      return blocks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
        int index) {
      return blocks_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getBlocksCount(); i++) {
        if (!getBlocks(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeMessage(1, blocks_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < blocks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, blocks_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) obj;

      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * blocks - list of blocks that are reported as corrupt
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.ReportBadBlocksRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.ReportBadBlocksRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
        } else {
          blocks_ = null;
          blocksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result) {
        if (blocksBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            blocks_ = java.util.Collections.unmodifiableList(blocks_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.blocks_ = blocks_;
        } else {
          result.blocks_ = blocksBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance()) return this;
        if (blocksBuilder_ == null) {
          if (!other.blocks_.isEmpty()) {
            if (blocks_.isEmpty()) {
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureBlocksIsMutable();
              blocks_.addAll(other.blocks_);
            }
            onChanged();
          }
        } else {
          if (!other.blocks_.isEmpty()) {
            if (blocksBuilder_.isEmpty()) {
              blocksBuilder_.dispose();
              blocksBuilder_ = null;
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000001);
              blocksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlocksFieldBuilder() : null;
            } else {
              blocksBuilder_.addAllMessages(other.blocks_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER,
                        extensionRegistry);
                if (blocksBuilder_ == null) {
                  ensureBlocksIsMutable();
                  blocks_.add(m);
                } else {
                  blocksBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_ =
        java.util.Collections.emptyList();
      private void ensureBlocksIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto>(blocks_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
        if (blocksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blocks_);
        } else {
          return blocksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public int getBlocksCount() {
        if (blocksBuilder_ == null) {
          return blocks_.size();
        } else {
          return blocksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);
        } else {
          return blocksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.set(index, value);
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.set(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(index, value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blocks_);
          onChanged();
        } else {
          blocksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder clearBlocks() {
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          blocksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public Builder removeBlocks(int index) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.remove(index);
          onChanged();
        } else {
          blocksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
          int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);  } else {
          return blocksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
           getBlocksOrBuilderList() {
        if (blocksBuilder_ != null) {
          return blocksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blocks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
        return getBlocksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder> 
           getBlocksBuilderList() {
        return getBlocksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
          getBlocksFieldBuilder() {
        if (blocksBuilder_ == null) {
          blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
                  blocks_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          blocks_ = null;
        }
        return blocksBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ReportBadBlocksRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ReportBadBlocksRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReportBadBlocksRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReportBadBlocksRequestProto>() {
      @java.lang.Override
      public ReportBadBlocksRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReportBadBlocksRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReportBadBlocksRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReportBadBlocksResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.ReportBadBlocksResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * void response
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.ReportBadBlocksResponseProto}
   */
  public static final class ReportBadBlocksResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.ReportBadBlocksResponseProto)
      ReportBadBlocksResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReportBadBlocksResponseProto.newBuilder() to construct.
    private ReportBadBlocksResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReportBadBlocksResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReportBadBlocksResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * void response
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.ReportBadBlocksResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.ReportBadBlocksResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.ReportBadBlocksResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.ReportBadBlocksResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReportBadBlocksResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReportBadBlocksResponseProto>() {
      @java.lang.Override
      public ReportBadBlocksResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReportBadBlocksResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReportBadBlocksResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CommitBlockSynchronizationRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return Whether the block field is set.
     */
    boolean hasBlock();
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return The block.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();

    /**
     * <code>required uint64 newGenStamp = 2;</code>
     * @return Whether the newGenStamp field is set.
     */
    boolean hasNewGenStamp();
    /**
     * <code>required uint64 newGenStamp = 2;</code>
     * @return The newGenStamp.
     */
    long getNewGenStamp();

    /**
     * <code>required uint64 newLength = 3;</code>
     * @return Whether the newLength field is set.
     */
    boolean hasNewLength();
    /**
     * <code>required uint64 newLength = 3;</code>
     * @return The newLength.
     */
    long getNewLength();

    /**
     * <code>required bool closeFile = 4;</code>
     * @return Whether the closeFile field is set.
     */
    boolean hasCloseFile();
    /**
     * <code>required bool closeFile = 4;</code>
     * @return The closeFile.
     */
    boolean getCloseFile();

    /**
     * <code>required bool deleteBlock = 5;</code>
     * @return Whether the deleteBlock field is set.
     */
    boolean hasDeleteBlock();
    /**
     * <code>required bool deleteBlock = 5;</code>
     * @return The deleteBlock.
     */
    boolean getDeleteBlock();

    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> 
        getNewTaragetsList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index);
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    int getNewTaragetsCount();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
        getNewTaragetsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
        int index);

    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @return A list containing the newTargetStorages.
     */
    java.util.List<java.lang.String>
        getNewTargetStoragesList();
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @return The count of newTargetStorages.
     */
    int getNewTargetStoragesCount();
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @param index The index of the element to return.
     * @return The newTargetStorages at the given index.
     */
    java.lang.String getNewTargetStorages(int index);
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @param index The index of the value to return.
     * @return The bytes of the newTargetStorages at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getNewTargetStoragesBytes(int index);
  }
  /**
   * <pre>
   **
   * Commit block synchronization request during lease recovery
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto}
   */
  public static final class CommitBlockSynchronizationRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto)
      CommitBlockSynchronizationRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CommitBlockSynchronizationRequestProto.newBuilder() to construct.
    private CommitBlockSynchronizationRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CommitBlockSynchronizationRequestProto() {
      newTaragets_ = java.util.Collections.emptyList();
      newTargetStorages_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CommitBlockSynchronizationRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCK_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return Whether the block field is set.
     */
    @java.lang.Override
    public boolean hasBlock() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return The block.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
      return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
    }
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
      return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
    }

    public static final int NEWGENSTAMP_FIELD_NUMBER = 2;
    private long newGenStamp_ = 0L;
    /**
     * <code>required uint64 newGenStamp = 2;</code>
     * @return Whether the newGenStamp field is set.
     */
    @java.lang.Override
    public boolean hasNewGenStamp() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 newGenStamp = 2;</code>
     * @return The newGenStamp.
     */
    @java.lang.Override
    public long getNewGenStamp() {
      return newGenStamp_;
    }

    public static final int NEWLENGTH_FIELD_NUMBER = 3;
    private long newLength_ = 0L;
    /**
     * <code>required uint64 newLength = 3;</code>
     * @return Whether the newLength field is set.
     */
    @java.lang.Override
    public boolean hasNewLength() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 newLength = 3;</code>
     * @return The newLength.
     */
    @java.lang.Override
    public long getNewLength() {
      return newLength_;
    }

    public static final int CLOSEFILE_FIELD_NUMBER = 4;
    private boolean closeFile_ = false;
    /**
     * <code>required bool closeFile = 4;</code>
     * @return Whether the closeFile field is set.
     */
    @java.lang.Override
    public boolean hasCloseFile() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bool closeFile = 4;</code>
     * @return The closeFile.
     */
    @java.lang.Override
    public boolean getCloseFile() {
      return closeFile_;
    }

    public static final int DELETEBLOCK_FIELD_NUMBER = 5;
    private boolean deleteBlock_ = false;
    /**
     * <code>required bool deleteBlock = 5;</code>
     * @return Whether the deleteBlock field is set.
     */
    @java.lang.Override
    public boolean hasDeleteBlock() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required bool deleteBlock = 5;</code>
     * @return The deleteBlock.
     */
    @java.lang.Override
    public boolean getDeleteBlock() {
      return deleteBlock_;
    }

    public static final int NEWTARAGETS_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> newTaragets_;
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getNewTaragetsList() {
      return newTaragets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
        getNewTaragetsOrBuilderList() {
      return newTaragets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    @java.lang.Override
    public int getNewTaragetsCount() {
      return newTaragets_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) {
      return newTaragets_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
        int index) {
      return newTaragets_.get(index);
    }

    public static final int NEWTARGETSTORAGES_FIELD_NUMBER = 7;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList newTargetStorages_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @return A list containing the newTargetStorages.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getNewTargetStoragesList() {
      return newTargetStorages_;
    }
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @return The count of newTargetStorages.
     */
    public int getNewTargetStoragesCount() {
      return newTargetStorages_.size();
    }
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @param index The index of the element to return.
     * @return The newTargetStorages at the given index.
     */
    public java.lang.String getNewTargetStorages(int index) {
      return newTargetStorages_.get(index);
    }
    /**
     * <code>repeated string newTargetStorages = 7;</code>
     * @param index The index of the value to return.
     * @return The bytes of the newTargetStorages at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getNewTargetStoragesBytes(int index) {
      return newTargetStorages_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNewGenStamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNewLength()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCloseFile()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDeleteBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getBlock().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getNewTaragetsCount(); i++) {
        if (!getNewTaragets(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getBlock());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, newGenStamp_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, newLength_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBool(4, closeFile_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeBool(5, deleteBlock_);
      }
      for (int i = 0; i < newTaragets_.size(); i++) {
        output.writeMessage(6, newTaragets_.get(i));
      }
      for (int i = 0; i < newTargetStorages_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 7, newTargetStorages_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getBlock());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, newGenStamp_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, newLength_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(4, closeFile_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(5, deleteBlock_);
      }
      for (int i = 0; i < newTaragets_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(6, newTaragets_.get(i));
      }
      {
        int dataSize = 0;
        for (int i = 0; i < newTargetStorages_.size(); i++) {
          dataSize += computeStringSizeNoTag(newTargetStorages_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getNewTargetStoragesList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) obj;

      if (hasBlock() != other.hasBlock()) return false;
      if (hasBlock()) {
        if (!getBlock()
            .equals(other.getBlock())) return false;
      }
      if (hasNewGenStamp() != other.hasNewGenStamp()) return false;
      if (hasNewGenStamp()) {
        if (getNewGenStamp()
            != other.getNewGenStamp()) return false;
      }
      if (hasNewLength() != other.hasNewLength()) return false;
      if (hasNewLength()) {
        if (getNewLength()
            != other.getNewLength()) return false;
      }
      if (hasCloseFile() != other.hasCloseFile()) return false;
      if (hasCloseFile()) {
        if (getCloseFile()
            != other.getCloseFile()) return false;
      }
      if (hasDeleteBlock() != other.hasDeleteBlock()) return false;
      if (hasDeleteBlock()) {
        if (getDeleteBlock()
            != other.getDeleteBlock()) return false;
      }
      if (!getNewTaragetsList()
          .equals(other.getNewTaragetsList())) return false;
      if (!getNewTargetStoragesList()
          .equals(other.getNewTargetStoragesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlock()) {
        hash = (37 * hash) + BLOCK_FIELD_NUMBER;
        hash = (53 * hash) + getBlock().hashCode();
      }
      if (hasNewGenStamp()) {
        hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNewGenStamp());
      }
      if (hasNewLength()) {
        hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNewLength());
      }
      if (hasCloseFile()) {
        hash = (37 * hash) + CLOSEFILE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getCloseFile());
      }
      if (hasDeleteBlock()) {
        hash = (37 * hash) + DELETEBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getDeleteBlock());
      }
      if (getNewTaragetsCount() > 0) {
        hash = (37 * hash) + NEWTARAGETS_FIELD_NUMBER;
        hash = (53 * hash) + getNewTaragetsList().hashCode();
      }
      if (getNewTargetStoragesCount() > 0) {
        hash = (37 * hash) + NEWTARGETSTORAGES_FIELD_NUMBER;
        hash = (53 * hash) + getNewTargetStoragesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Commit block synchronization request during lease recovery
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBlockFieldBuilder();
          getNewTaragetsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        block_ = null;
        if (blockBuilder_ != null) {
          blockBuilder_.dispose();
          blockBuilder_ = null;
        }
        newGenStamp_ = 0L;
        newLength_ = 0L;
        closeFile_ = false;
        deleteBlock_ = false;
        if (newTaragetsBuilder_ == null) {
          newTaragets_ = java.util.Collections.emptyList();
        } else {
          newTaragets_ = null;
          newTaragetsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000020);
        newTargetStorages_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result) {
        if (newTaragetsBuilder_ == null) {
          if (((bitField0_ & 0x00000020) != 0)) {
            newTaragets_ = java.util.Collections.unmodifiableList(newTaragets_);
            bitField0_ = (bitField0_ & ~0x00000020);
          }
          result.newTaragets_ = newTaragets_;
        } else {
          result.newTaragets_ = newTaragetsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.block_ = blockBuilder_ == null
              ? block_
              : blockBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.newGenStamp_ = newGenStamp_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.newLength_ = newLength_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.closeFile_ = closeFile_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.deleteBlock_ = deleteBlock_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          newTargetStorages_.makeImmutable();
          result.newTargetStorages_ = newTargetStorages_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance()) return this;
        if (other.hasBlock()) {
          mergeBlock(other.getBlock());
        }
        if (other.hasNewGenStamp()) {
          setNewGenStamp(other.getNewGenStamp());
        }
        if (other.hasNewLength()) {
          setNewLength(other.getNewLength());
        }
        if (other.hasCloseFile()) {
          setCloseFile(other.getCloseFile());
        }
        if (other.hasDeleteBlock()) {
          setDeleteBlock(other.getDeleteBlock());
        }
        if (newTaragetsBuilder_ == null) {
          if (!other.newTaragets_.isEmpty()) {
            if (newTaragets_.isEmpty()) {
              newTaragets_ = other.newTaragets_;
              bitField0_ = (bitField0_ & ~0x00000020);
            } else {
              ensureNewTaragetsIsMutable();
              newTaragets_.addAll(other.newTaragets_);
            }
            onChanged();
          }
        } else {
          if (!other.newTaragets_.isEmpty()) {
            if (newTaragetsBuilder_.isEmpty()) {
              newTaragetsBuilder_.dispose();
              newTaragetsBuilder_ = null;
              newTaragets_ = other.newTaragets_;
              bitField0_ = (bitField0_ & ~0x00000020);
              newTaragetsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getNewTaragetsFieldBuilder() : null;
            } else {
              newTaragetsBuilder_.addAllMessages(other.newTaragets_);
            }
          }
        }
        if (!other.newTargetStorages_.isEmpty()) {
          if (newTargetStorages_.isEmpty()) {
            newTargetStorages_ = other.newTargetStorages_;
            bitField0_ |= 0x00000040;
          } else {
            ensureNewTargetStoragesIsMutable();
            newTargetStorages_.addAll(other.newTargetStorages_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlock()) {
          return false;
        }
        if (!hasNewGenStamp()) {
          return false;
        }
        if (!hasNewLength()) {
          return false;
        }
        if (!hasCloseFile()) {
          return false;
        }
        if (!hasDeleteBlock()) {
          return false;
        }
        if (!getBlock().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getNewTaragetsCount(); i++) {
          if (!getNewTaragets(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getBlockFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                newGenStamp_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                newLength_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                closeFile_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                deleteBlock_ = input.readBool();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 50: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.PARSER,
                        extensionRegistry);
                if (newTaragetsBuilder_ == null) {
                  ensureNewTaragetsIsMutable();
                  newTaragets_.add(m);
                } else {
                  newTaragetsBuilder_.addMessage(m);
                }
                break;
              } // case 50
              case 58: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureNewTargetStoragesIsMutable();
                newTargetStorages_.add(bs);
                break;
              } // case 58
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       * @return Whether the block field is set.
       */
      public boolean hasBlock() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       * @return The block.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
        if (blockBuilder_ == null) {
          return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
        } else {
          return blockBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
        if (blockBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          block_ = value;
        } else {
          blockBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder setBlock(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
        if (blockBuilder_ == null) {
          block_ = builderForValue.build();
        } else {
          blockBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
        if (blockBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            block_ != null &&
            block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
            getBlockBuilder().mergeFrom(value);
          } else {
            block_ = value;
          }
        } else {
          blockBuilder_.mergeFrom(value);
        }
        if (block_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder clearBlock() {
        bitField0_ = (bitField0_ & ~0x00000001);
        block_ = null;
        if (blockBuilder_ != null) {
          blockBuilder_.dispose();
          blockBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getBlockFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
        if (blockBuilder_ != null) {
          return blockBuilder_.getMessageOrBuilder();
        } else {
          return block_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> 
          getBlockFieldBuilder() {
        if (blockBuilder_ == null) {
          blockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
                  getBlock(),
                  getParentForChildren(),
                  isClean());
          block_ = null;
        }
        return blockBuilder_;
      }

      private long newGenStamp_ ;
      /**
       * <code>required uint64 newGenStamp = 2;</code>
       * @return Whether the newGenStamp field is set.
       */
      @java.lang.Override
      public boolean hasNewGenStamp() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 newGenStamp = 2;</code>
       * @return The newGenStamp.
       */
      @java.lang.Override
      public long getNewGenStamp() {
        return newGenStamp_;
      }
      /**
       * <code>required uint64 newGenStamp = 2;</code>
       * @param value The newGenStamp to set.
       * @return This builder for chaining.
       */
      public Builder setNewGenStamp(long value) {

        newGenStamp_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 newGenStamp = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearNewGenStamp() {
        bitField0_ = (bitField0_ & ~0x00000002);
        newGenStamp_ = 0L;
        onChanged();
        return this;
      }

      private long newLength_ ;
      /**
       * <code>required uint64 newLength = 3;</code>
       * @return Whether the newLength field is set.
       */
      @java.lang.Override
      public boolean hasNewLength() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 newLength = 3;</code>
       * @return The newLength.
       */
      @java.lang.Override
      public long getNewLength() {
        return newLength_;
      }
      /**
       * <code>required uint64 newLength = 3;</code>
       * @param value The newLength to set.
       * @return This builder for chaining.
       */
      public Builder setNewLength(long value) {

        newLength_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 newLength = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearNewLength() {
        bitField0_ = (bitField0_ & ~0x00000004);
        newLength_ = 0L;
        onChanged();
        return this;
      }

      private boolean closeFile_ ;
      /**
       * <code>required bool closeFile = 4;</code>
       * @return Whether the closeFile field is set.
       */
      @java.lang.Override
      public boolean hasCloseFile() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bool closeFile = 4;</code>
       * @return The closeFile.
       */
      @java.lang.Override
      public boolean getCloseFile() {
        return closeFile_;
      }
      /**
       * <code>required bool closeFile = 4;</code>
       * @param value The closeFile to set.
       * @return This builder for chaining.
       */
      public Builder setCloseFile(boolean value) {

        closeFile_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bool closeFile = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearCloseFile() {
        bitField0_ = (bitField0_ & ~0x00000008);
        closeFile_ = false;
        onChanged();
        return this;
      }

      private boolean deleteBlock_ ;
      /**
       * <code>required bool deleteBlock = 5;</code>
       * @return Whether the deleteBlock field is set.
       */
      @java.lang.Override
      public boolean hasDeleteBlock() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required bool deleteBlock = 5;</code>
       * @return The deleteBlock.
       */
      @java.lang.Override
      public boolean getDeleteBlock() {
        return deleteBlock_;
      }
      /**
       * <code>required bool deleteBlock = 5;</code>
       * @param value The deleteBlock to set.
       * @return This builder for chaining.
       */
      public Builder setDeleteBlock(boolean value) {

        deleteBlock_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required bool deleteBlock = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearDeleteBlock() {
        bitField0_ = (bitField0_ & ~0x00000010);
        deleteBlock_ = false;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> newTaragets_ =
        java.util.Collections.emptyList();
      private void ensureNewTaragetsIsMutable() {
        if (!((bitField0_ & 0x00000020) != 0)) {
          newTaragets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>(newTaragets_);
          bitField0_ |= 0x00000020;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> newTaragetsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getNewTaragetsList() {
        if (newTaragetsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(newTaragets_);
        } else {
          return newTaragetsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public int getNewTaragetsCount() {
        if (newTaragetsBuilder_ == null) {
          return newTaragets_.size();
        } else {
          return newTaragetsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) {
        if (newTaragetsBuilder_ == null) {
          return newTaragets_.get(index);
        } else {
          return newTaragetsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder setNewTaragets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (newTaragetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureNewTaragetsIsMutable();
          newTaragets_.set(index, value);
          onChanged();
        } else {
          newTaragetsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder setNewTaragets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
        if (newTaragetsBuilder_ == null) {
          ensureNewTaragetsIsMutable();
          newTaragets_.set(index, builderForValue.build());
          onChanged();
        } else {
          newTaragetsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder addNewTaragets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (newTaragetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureNewTaragetsIsMutable();
          newTaragets_.add(value);
          onChanged();
        } else {
          newTaragetsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder addNewTaragets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (newTaragetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureNewTaragetsIsMutable();
          newTaragets_.add(index, value);
          onChanged();
        } else {
          newTaragetsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder addNewTaragets(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
        if (newTaragetsBuilder_ == null) {
          ensureNewTaragetsIsMutable();
          newTaragets_.add(builderForValue.build());
          onChanged();
        } else {
          newTaragetsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder addNewTaragets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
        if (newTaragetsBuilder_ == null) {
          ensureNewTaragetsIsMutable();
          newTaragets_.add(index, builderForValue.build());
          onChanged();
        } else {
          newTaragetsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder addAllNewTaragets(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> values) {
        if (newTaragetsBuilder_ == null) {
          ensureNewTaragetsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, newTaragets_);
          onChanged();
        } else {
          newTaragetsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder clearNewTaragets() {
        if (newTaragetsBuilder_ == null) {
          newTaragets_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000020);
          onChanged();
        } else {
          newTaragetsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public Builder removeNewTaragets(int index) {
        if (newTaragetsBuilder_ == null) {
          ensureNewTaragetsIsMutable();
          newTaragets_.remove(index);
          onChanged();
        } else {
          newTaragetsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getNewTaragetsBuilder(
          int index) {
        return getNewTaragetsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
          int index) {
        if (newTaragetsBuilder_ == null) {
          return newTaragets_.get(index);  } else {
          return newTaragetsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
           getNewTaragetsOrBuilderList() {
        if (newTaragetsBuilder_ != null) {
          return newTaragetsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(newTaragets_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder() {
        return getNewTaragetsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder(
          int index) {
        return getNewTaragetsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeIDProto newTaragets = 6;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder> 
           getNewTaragetsBuilderList() {
        return getNewTaragetsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
          getNewTaragetsFieldBuilder() {
        if (newTaragetsBuilder_ == null) {
          newTaragetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
                  newTaragets_,
                  ((bitField0_ & 0x00000020) != 0),
                  getParentForChildren(),
                  isClean());
          newTaragets_ = null;
        }
        return newTaragetsBuilder_;
      }

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList newTargetStorages_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureNewTargetStoragesIsMutable() {
        if (!newTargetStorages_.isModifiable()) {
          newTargetStorages_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(newTargetStorages_);
        }
        bitField0_ |= 0x00000040;
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @return A list containing the newTargetStorages.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getNewTargetStoragesList() {
        newTargetStorages_.makeImmutable();
        return newTargetStorages_;
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @return The count of newTargetStorages.
       */
      public int getNewTargetStoragesCount() {
        return newTargetStorages_.size();
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @param index The index of the element to return.
       * @return The newTargetStorages at the given index.
       */
      public java.lang.String getNewTargetStorages(int index) {
        return newTargetStorages_.get(index);
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @param index The index of the value to return.
       * @return The bytes of the newTargetStorages at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getNewTargetStoragesBytes(int index) {
        return newTargetStorages_.getByteString(index);
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @param index The index to set the value at.
       * @param value The newTargetStorages to set.
       * @return This builder for chaining.
       */
      public Builder setNewTargetStorages(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureNewTargetStoragesIsMutable();
        newTargetStorages_.set(index, value);
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @param value The newTargetStorages to add.
       * @return This builder for chaining.
       */
      public Builder addNewTargetStorages(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureNewTargetStoragesIsMutable();
        newTargetStorages_.add(value);
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @param values The newTargetStorages to add.
       * @return This builder for chaining.
       */
      public Builder addAllNewTargetStorages(
          java.lang.Iterable<java.lang.String> values) {
        ensureNewTargetStoragesIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, newTargetStorages_);
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearNewTargetStorages() {
        newTargetStorages_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000040);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string newTargetStorages = 7;</code>
       * @param value The bytes of the newTargetStorages to add.
       * @return This builder for chaining.
       */
      public Builder addNewTargetStoragesBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureNewTargetStoragesIsMutable();
        newTargetStorages_.add(value);
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CommitBlockSynchronizationRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CommitBlockSynchronizationRequestProto>() {
      @java.lang.Override
      public CommitBlockSynchronizationRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CommitBlockSynchronizationRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CommitBlockSynchronizationRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CommitBlockSynchronizationResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * void response
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto}
   */
  public static final class CommitBlockSynchronizationResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto)
      CommitBlockSynchronizationResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CommitBlockSynchronizationResponseProto.newBuilder() to construct.
    private CommitBlockSynchronizationResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CommitBlockSynchronizationResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CommitBlockSynchronizationResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * void response
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CommitBlockSynchronizationResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CommitBlockSynchronizationResponseProto>() {
      @java.lang.Override
      public CommitBlockSynchronizationResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CommitBlockSynchronizationResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CommitBlockSynchronizationResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SlowPeerReportProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.SlowPeerReportProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string dataNodeId = 1;</code>
     * @return Whether the dataNodeId field is set.
     */
    boolean hasDataNodeId();
    /**
     * <code>optional string dataNodeId = 1;</code>
     * @return The dataNodeId.
     */
    java.lang.String getDataNodeId();
    /**
     * <code>optional string dataNodeId = 1;</code>
     * @return The bytes for dataNodeId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getDataNodeIdBytes();

    /**
     * <code>optional double aggregateLatency = 2;</code>
     * @return Whether the aggregateLatency field is set.
     */
    boolean hasAggregateLatency();
    /**
     * <code>optional double aggregateLatency = 2;</code>
     * @return The aggregateLatency.
     */
    double getAggregateLatency();

    /**
     * <code>optional double median = 3;</code>
     * @return Whether the median field is set.
     */
    boolean hasMedian();
    /**
     * <code>optional double median = 3;</code>
     * @return The median.
     */
    double getMedian();

    /**
     * <code>optional double mad = 4;</code>
     * @return Whether the mad field is set.
     */
    boolean hasMad();
    /**
     * <code>optional double mad = 4;</code>
     * @return The mad.
     */
    double getMad();

    /**
     * <code>optional double upperLimitLatency = 5;</code>
     * @return Whether the upperLimitLatency field is set.
     */
    boolean hasUpperLimitLatency();
    /**
     * <code>optional double upperLimitLatency = 5;</code>
     * @return The upperLimitLatency.
     */
    double getUpperLimitLatency();
  }
  /**
   * <pre>
   **
   * Information about a single slow peer that may be reported by
   * the DataNode to the NameNode as part of the heartbeat request.
   * The message includes the peer's DataNodeId and its
   * aggregate packet latency as observed by the reporting DataNode.
   * (DataNodeId must be transmitted as a string for protocol compability
   *  with earlier versions of Hadoop).
   *
   * The exact choice of the aggregate is opaque to the NameNode but it
   * _should_ be chosen consistenly by all DataNodes in the cluster.
   * Examples of aggregates are 90th percentile (good) and mean (not so
   * good).
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.SlowPeerReportProto}
   */
  public static final class SlowPeerReportProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.SlowPeerReportProto)
      SlowPeerReportProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SlowPeerReportProto.newBuilder() to construct.
    private SlowPeerReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SlowPeerReportProto() {
      dataNodeId_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SlowPeerReportProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder.class);
    }

    private int bitField0_;
    public static final int DATANODEID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object dataNodeId_ = "";
    /**
     * <code>optional string dataNodeId = 1;</code>
     * @return Whether the dataNodeId field is set.
     */
    @java.lang.Override
    public boolean hasDataNodeId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string dataNodeId = 1;</code>
     * @return The dataNodeId.
     */
    @java.lang.Override
    public java.lang.String getDataNodeId() {
      java.lang.Object ref = dataNodeId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          dataNodeId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string dataNodeId = 1;</code>
     * @return The bytes for dataNodeId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getDataNodeIdBytes() {
      java.lang.Object ref = dataNodeId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        dataNodeId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int AGGREGATELATENCY_FIELD_NUMBER = 2;
    private double aggregateLatency_ = 0D;
    /**
     * <code>optional double aggregateLatency = 2;</code>
     * @return Whether the aggregateLatency field is set.
     */
    @java.lang.Override
    public boolean hasAggregateLatency() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional double aggregateLatency = 2;</code>
     * @return The aggregateLatency.
     */
    @java.lang.Override
    public double getAggregateLatency() {
      return aggregateLatency_;
    }

    public static final int MEDIAN_FIELD_NUMBER = 3;
    private double median_ = 0D;
    /**
     * <code>optional double median = 3;</code>
     * @return Whether the median field is set.
     */
    @java.lang.Override
    public boolean hasMedian() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional double median = 3;</code>
     * @return The median.
     */
    @java.lang.Override
    public double getMedian() {
      return median_;
    }

    public static final int MAD_FIELD_NUMBER = 4;
    private double mad_ = 0D;
    /**
     * <code>optional double mad = 4;</code>
     * @return Whether the mad field is set.
     */
    @java.lang.Override
    public boolean hasMad() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional double mad = 4;</code>
     * @return The mad.
     */
    @java.lang.Override
    public double getMad() {
      return mad_;
    }

    public static final int UPPERLIMITLATENCY_FIELD_NUMBER = 5;
    private double upperLimitLatency_ = 0D;
    /**
     * <code>optional double upperLimitLatency = 5;</code>
     * @return Whether the upperLimitLatency field is set.
     */
    @java.lang.Override
    public boolean hasUpperLimitLatency() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional double upperLimitLatency = 5;</code>
     * @return The upperLimitLatency.
     */
    @java.lang.Override
    public double getUpperLimitLatency() {
      return upperLimitLatency_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, dataNodeId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeDouble(2, aggregateLatency_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeDouble(3, median_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeDouble(4, mad_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeDouble(5, upperLimitLatency_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, dataNodeId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(2, aggregateLatency_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(3, median_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(4, mad_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(5, upperLimitLatency_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto) obj;

      if (hasDataNodeId() != other.hasDataNodeId()) return false;
      if (hasDataNodeId()) {
        if (!getDataNodeId()
            .equals(other.getDataNodeId())) return false;
      }
      if (hasAggregateLatency() != other.hasAggregateLatency()) return false;
      if (hasAggregateLatency()) {
        if (java.lang.Double.doubleToLongBits(getAggregateLatency())
            != java.lang.Double.doubleToLongBits(
                other.getAggregateLatency())) return false;
      }
      if (hasMedian() != other.hasMedian()) return false;
      if (hasMedian()) {
        if (java.lang.Double.doubleToLongBits(getMedian())
            != java.lang.Double.doubleToLongBits(
                other.getMedian())) return false;
      }
      if (hasMad() != other.hasMad()) return false;
      if (hasMad()) {
        if (java.lang.Double.doubleToLongBits(getMad())
            != java.lang.Double.doubleToLongBits(
                other.getMad())) return false;
      }
      if (hasUpperLimitLatency() != other.hasUpperLimitLatency()) return false;
      if (hasUpperLimitLatency()) {
        if (java.lang.Double.doubleToLongBits(getUpperLimitLatency())
            != java.lang.Double.doubleToLongBits(
                other.getUpperLimitLatency())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasDataNodeId()) {
        hash = (37 * hash) + DATANODEID_FIELD_NUMBER;
        hash = (53 * hash) + getDataNodeId().hashCode();
      }
      if (hasAggregateLatency()) {
        hash = (37 * hash) + AGGREGATELATENCY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getAggregateLatency()));
      }
      if (hasMedian()) {
        hash = (37 * hash) + MEDIAN_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getMedian()));
      }
      if (hasMad()) {
        hash = (37 * hash) + MAD_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getMad()));
      }
      if (hasUpperLimitLatency()) {
        hash = (37 * hash) + UPPERLIMITLATENCY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getUpperLimitLatency()));
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Information about a single slow peer that may be reported by
     * the DataNode to the NameNode as part of the heartbeat request.
     * The message includes the peer's DataNodeId and its
     * aggregate packet latency as observed by the reporting DataNode.
     * (DataNodeId must be transmitted as a string for protocol compability
     *  with earlier versions of Hadoop).
     *
     * The exact choice of the aggregate is opaque to the NameNode but it
     * _should_ be chosen consistenly by all DataNodes in the cluster.
     * Examples of aggregates are 90th percentile (good) and mean (not so
     * good).
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.SlowPeerReportProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.SlowPeerReportProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        dataNodeId_ = "";
        aggregateLatency_ = 0D;
        median_ = 0D;
        mad_ = 0D;
        upperLimitLatency_ = 0D;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.dataNodeId_ = dataNodeId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.aggregateLatency_ = aggregateLatency_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.median_ = median_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.mad_ = mad_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.upperLimitLatency_ = upperLimitLatency_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto.getDefaultInstance()) return this;
        if (other.hasDataNodeId()) {
          dataNodeId_ = other.dataNodeId_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasAggregateLatency()) {
          setAggregateLatency(other.getAggregateLatency());
        }
        if (other.hasMedian()) {
          setMedian(other.getMedian());
        }
        if (other.hasMad()) {
          setMad(other.getMad());
        }
        if (other.hasUpperLimitLatency()) {
          setUpperLimitLatency(other.getUpperLimitLatency());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                dataNodeId_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 17: {
                aggregateLatency_ = input.readDouble();
                bitField0_ |= 0x00000002;
                break;
              } // case 17
              case 25: {
                median_ = input.readDouble();
                bitField0_ |= 0x00000004;
                break;
              } // case 25
              case 33: {
                mad_ = input.readDouble();
                bitField0_ |= 0x00000008;
                break;
              } // case 33
              case 41: {
                upperLimitLatency_ = input.readDouble();
                bitField0_ |= 0x00000010;
                break;
              } // case 41
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object dataNodeId_ = "";
      /**
       * <code>optional string dataNodeId = 1;</code>
       * @return Whether the dataNodeId field is set.
       */
      public boolean hasDataNodeId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string dataNodeId = 1;</code>
       * @return The dataNodeId.
       */
      public java.lang.String getDataNodeId() {
        java.lang.Object ref = dataNodeId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            dataNodeId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string dataNodeId = 1;</code>
       * @return The bytes for dataNodeId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getDataNodeIdBytes() {
        java.lang.Object ref = dataNodeId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          dataNodeId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string dataNodeId = 1;</code>
       * @param value The dataNodeId to set.
       * @return This builder for chaining.
       */
      public Builder setDataNodeId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        dataNodeId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string dataNodeId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearDataNodeId() {
        dataNodeId_ = getDefaultInstance().getDataNodeId();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string dataNodeId = 1;</code>
       * @param value The bytes for dataNodeId to set.
       * @return This builder for chaining.
       */
      public Builder setDataNodeIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        dataNodeId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private double aggregateLatency_ ;
      /**
       * <code>optional double aggregateLatency = 2;</code>
       * @return Whether the aggregateLatency field is set.
       */
      @java.lang.Override
      public boolean hasAggregateLatency() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional double aggregateLatency = 2;</code>
       * @return The aggregateLatency.
       */
      @java.lang.Override
      public double getAggregateLatency() {
        return aggregateLatency_;
      }
      /**
       * <code>optional double aggregateLatency = 2;</code>
       * @param value The aggregateLatency to set.
       * @return This builder for chaining.
       */
      public Builder setAggregateLatency(double value) {

        aggregateLatency_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional double aggregateLatency = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearAggregateLatency() {
        bitField0_ = (bitField0_ & ~0x00000002);
        aggregateLatency_ = 0D;
        onChanged();
        return this;
      }

      private double median_ ;
      /**
       * <code>optional double median = 3;</code>
       * @return Whether the median field is set.
       */
      @java.lang.Override
      public boolean hasMedian() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional double median = 3;</code>
       * @return The median.
       */
      @java.lang.Override
      public double getMedian() {
        return median_;
      }
      /**
       * <code>optional double median = 3;</code>
       * @param value The median to set.
       * @return This builder for chaining.
       */
      public Builder setMedian(double value) {

        median_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional double median = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearMedian() {
        bitField0_ = (bitField0_ & ~0x00000004);
        median_ = 0D;
        onChanged();
        return this;
      }

      private double mad_ ;
      /**
       * <code>optional double mad = 4;</code>
       * @return Whether the mad field is set.
       */
      @java.lang.Override
      public boolean hasMad() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional double mad = 4;</code>
       * @return The mad.
       */
      @java.lang.Override
      public double getMad() {
        return mad_;
      }
      /**
       * <code>optional double mad = 4;</code>
       * @param value The mad to set.
       * @return This builder for chaining.
       */
      public Builder setMad(double value) {

        mad_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional double mad = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearMad() {
        bitField0_ = (bitField0_ & ~0x00000008);
        mad_ = 0D;
        onChanged();
        return this;
      }

      private double upperLimitLatency_ ;
      /**
       * <code>optional double upperLimitLatency = 5;</code>
       * @return Whether the upperLimitLatency field is set.
       */
      @java.lang.Override
      public boolean hasUpperLimitLatency() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional double upperLimitLatency = 5;</code>
       * @return The upperLimitLatency.
       */
      @java.lang.Override
      public double getUpperLimitLatency() {
        return upperLimitLatency_;
      }
      /**
       * <code>optional double upperLimitLatency = 5;</code>
       * @param value The upperLimitLatency to set.
       * @return This builder for chaining.
       */
      public Builder setUpperLimitLatency(double value) {

        upperLimitLatency_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional double upperLimitLatency = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearUpperLimitLatency() {
        bitField0_ = (bitField0_ & ~0x00000010);
        upperLimitLatency_ = 0D;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.SlowPeerReportProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.SlowPeerReportProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SlowPeerReportProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SlowPeerReportProto>() {
      @java.lang.Override
      public SlowPeerReportProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SlowPeerReportProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SlowPeerReportProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SlowDiskReportProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.datanode.SlowDiskReportProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string basePath = 1;</code>
     * @return Whether the basePath field is set.
     */
    boolean hasBasePath();
    /**
     * <code>optional string basePath = 1;</code>
     * @return The basePath.
     */
    java.lang.String getBasePath();
    /**
     * <code>optional string basePath = 1;</code>
     * @return The bytes for basePath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBasePathBytes();

    /**
     * <code>optional double meanMetadataOpLatency = 2;</code>
     * @return Whether the meanMetadataOpLatency field is set.
     */
    boolean hasMeanMetadataOpLatency();
    /**
     * <code>optional double meanMetadataOpLatency = 2;</code>
     * @return The meanMetadataOpLatency.
     */
    double getMeanMetadataOpLatency();

    /**
     * <code>optional double meanReadIoLatency = 3;</code>
     * @return Whether the meanReadIoLatency field is set.
     */
    boolean hasMeanReadIoLatency();
    /**
     * <code>optional double meanReadIoLatency = 3;</code>
     * @return The meanReadIoLatency.
     */
    double getMeanReadIoLatency();

    /**
     * <code>optional double meanWriteIoLatency = 4;</code>
     * @return Whether the meanWriteIoLatency field is set.
     */
    boolean hasMeanWriteIoLatency();
    /**
     * <code>optional double meanWriteIoLatency = 4;</code>
     * @return The meanWriteIoLatency.
     */
    double getMeanWriteIoLatency();
  }
  /**
   * <pre>
   **
   * Information about a single slow disk that may be reported by
   * the DataNode to the NameNode as part of the heartbeat request.
   * The message includes the disk's basePath, mean metadata op latency,
   * mean read io latency and mean write io latency as observed by the DataNode.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.datanode.SlowDiskReportProto}
   */
  public static final class SlowDiskReportProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.datanode.SlowDiskReportProto)
      SlowDiskReportProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SlowDiskReportProto.newBuilder() to construct.
    private SlowDiskReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SlowDiskReportProto() {
      basePath_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SlowDiskReportProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder.class);
    }

    private int bitField0_;
    public static final int BASEPATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object basePath_ = "";
    /**
     * <code>optional string basePath = 1;</code>
     * @return Whether the basePath field is set.
     */
    @java.lang.Override
    public boolean hasBasePath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string basePath = 1;</code>
     * @return The basePath.
     */
    @java.lang.Override
    public java.lang.String getBasePath() {
      java.lang.Object ref = basePath_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          basePath_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string basePath = 1;</code>
     * @return The bytes for basePath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBasePathBytes() {
      java.lang.Object ref = basePath_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        basePath_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int MEANMETADATAOPLATENCY_FIELD_NUMBER = 2;
    private double meanMetadataOpLatency_ = 0D;
    /**
     * <code>optional double meanMetadataOpLatency = 2;</code>
     * @return Whether the meanMetadataOpLatency field is set.
     */
    @java.lang.Override
    public boolean hasMeanMetadataOpLatency() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional double meanMetadataOpLatency = 2;</code>
     * @return The meanMetadataOpLatency.
     */
    @java.lang.Override
    public double getMeanMetadataOpLatency() {
      return meanMetadataOpLatency_;
    }

    public static final int MEANREADIOLATENCY_FIELD_NUMBER = 3;
    private double meanReadIoLatency_ = 0D;
    /**
     * <code>optional double meanReadIoLatency = 3;</code>
     * @return Whether the meanReadIoLatency field is set.
     */
    @java.lang.Override
    public boolean hasMeanReadIoLatency() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional double meanReadIoLatency = 3;</code>
     * @return The meanReadIoLatency.
     */
    @java.lang.Override
    public double getMeanReadIoLatency() {
      return meanReadIoLatency_;
    }

    public static final int MEANWRITEIOLATENCY_FIELD_NUMBER = 4;
    private double meanWriteIoLatency_ = 0D;
    /**
     * <code>optional double meanWriteIoLatency = 4;</code>
     * @return Whether the meanWriteIoLatency field is set.
     */
    @java.lang.Override
    public boolean hasMeanWriteIoLatency() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional double meanWriteIoLatency = 4;</code>
     * @return The meanWriteIoLatency.
     */
    @java.lang.Override
    public double getMeanWriteIoLatency() {
      return meanWriteIoLatency_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, basePath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeDouble(2, meanMetadataOpLatency_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeDouble(3, meanReadIoLatency_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeDouble(4, meanWriteIoLatency_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, basePath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(2, meanMetadataOpLatency_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(3, meanReadIoLatency_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeDoubleSize(4, meanWriteIoLatency_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto) obj;

      if (hasBasePath() != other.hasBasePath()) return false;
      if (hasBasePath()) {
        if (!getBasePath()
            .equals(other.getBasePath())) return false;
      }
      if (hasMeanMetadataOpLatency() != other.hasMeanMetadataOpLatency()) return false;
      if (hasMeanMetadataOpLatency()) {
        if (java.lang.Double.doubleToLongBits(getMeanMetadataOpLatency())
            != java.lang.Double.doubleToLongBits(
                other.getMeanMetadataOpLatency())) return false;
      }
      if (hasMeanReadIoLatency() != other.hasMeanReadIoLatency()) return false;
      if (hasMeanReadIoLatency()) {
        if (java.lang.Double.doubleToLongBits(getMeanReadIoLatency())
            != java.lang.Double.doubleToLongBits(
                other.getMeanReadIoLatency())) return false;
      }
      if (hasMeanWriteIoLatency() != other.hasMeanWriteIoLatency()) return false;
      if (hasMeanWriteIoLatency()) {
        if (java.lang.Double.doubleToLongBits(getMeanWriteIoLatency())
            != java.lang.Double.doubleToLongBits(
                other.getMeanWriteIoLatency())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBasePath()) {
        hash = (37 * hash) + BASEPATH_FIELD_NUMBER;
        hash = (53 * hash) + getBasePath().hashCode();
      }
      if (hasMeanMetadataOpLatency()) {
        hash = (37 * hash) + MEANMETADATAOPLATENCY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getMeanMetadataOpLatency()));
      }
      if (hasMeanReadIoLatency()) {
        hash = (37 * hash) + MEANREADIOLATENCY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getMeanReadIoLatency()));
      }
      if (hasMeanWriteIoLatency()) {
        hash = (37 * hash) + MEANWRITEIOLATENCY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            java.lang.Double.doubleToLongBits(getMeanWriteIoLatency()));
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Information about a single slow disk that may be reported by
     * the DataNode to the NameNode as part of the heartbeat request.
     * The message includes the disk's basePath, mean metadata op latency,
     * mean read io latency and mean write io latency as observed by the DataNode.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.datanode.SlowDiskReportProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.datanode.SlowDiskReportProto)
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.class, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        basePath_ = "";
        meanMetadataOpLatency_ = 0D;
        meanReadIoLatency_ = 0D;
        meanWriteIoLatency_ = 0D;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.basePath_ = basePath_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.meanMetadataOpLatency_ = meanMetadataOpLatency_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.meanReadIoLatency_ = meanReadIoLatency_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.meanWriteIoLatency_ = meanWriteIoLatency_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto.getDefaultInstance()) return this;
        if (other.hasBasePath()) {
          basePath_ = other.basePath_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasMeanMetadataOpLatency()) {
          setMeanMetadataOpLatency(other.getMeanMetadataOpLatency());
        }
        if (other.hasMeanReadIoLatency()) {
          setMeanReadIoLatency(other.getMeanReadIoLatency());
        }
        if (other.hasMeanWriteIoLatency()) {
          setMeanWriteIoLatency(other.getMeanWriteIoLatency());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                basePath_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 17: {
                meanMetadataOpLatency_ = input.readDouble();
                bitField0_ |= 0x00000002;
                break;
              } // case 17
              case 25: {
                meanReadIoLatency_ = input.readDouble();
                bitField0_ |= 0x00000004;
                break;
              } // case 25
              case 33: {
                meanWriteIoLatency_ = input.readDouble();
                bitField0_ |= 0x00000008;
                break;
              } // case 33
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object basePath_ = "";
      /**
       * <code>optional string basePath = 1;</code>
       * @return Whether the basePath field is set.
       */
      public boolean hasBasePath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string basePath = 1;</code>
       * @return The basePath.
       */
      public java.lang.String getBasePath() {
        java.lang.Object ref = basePath_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            basePath_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string basePath = 1;</code>
       * @return The bytes for basePath.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBasePathBytes() {
        java.lang.Object ref = basePath_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          basePath_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string basePath = 1;</code>
       * @param value The basePath to set.
       * @return This builder for chaining.
       */
      public Builder setBasePath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        basePath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string basePath = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBasePath() {
        basePath_ = getDefaultInstance().getBasePath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string basePath = 1;</code>
       * @param value The bytes for basePath to set.
       * @return This builder for chaining.
       */
      public Builder setBasePathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        basePath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private double meanMetadataOpLatency_ ;
      /**
       * <code>optional double meanMetadataOpLatency = 2;</code>
       * @return Whether the meanMetadataOpLatency field is set.
       */
      @java.lang.Override
      public boolean hasMeanMetadataOpLatency() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional double meanMetadataOpLatency = 2;</code>
       * @return The meanMetadataOpLatency.
       */
      @java.lang.Override
      public double getMeanMetadataOpLatency() {
        return meanMetadataOpLatency_;
      }
      /**
       * <code>optional double meanMetadataOpLatency = 2;</code>
       * @param value The meanMetadataOpLatency to set.
       * @return This builder for chaining.
       */
      public Builder setMeanMetadataOpLatency(double value) {

        meanMetadataOpLatency_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional double meanMetadataOpLatency = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearMeanMetadataOpLatency() {
        bitField0_ = (bitField0_ & ~0x00000002);
        meanMetadataOpLatency_ = 0D;
        onChanged();
        return this;
      }

      private double meanReadIoLatency_ ;
      /**
       * <code>optional double meanReadIoLatency = 3;</code>
       * @return Whether the meanReadIoLatency field is set.
       */
      @java.lang.Override
      public boolean hasMeanReadIoLatency() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional double meanReadIoLatency = 3;</code>
       * @return The meanReadIoLatency.
       */
      @java.lang.Override
      public double getMeanReadIoLatency() {
        return meanReadIoLatency_;
      }
      /**
       * <code>optional double meanReadIoLatency = 3;</code>
       * @param value The meanReadIoLatency to set.
       * @return This builder for chaining.
       */
      public Builder setMeanReadIoLatency(double value) {

        meanReadIoLatency_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional double meanReadIoLatency = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearMeanReadIoLatency() {
        bitField0_ = (bitField0_ & ~0x00000004);
        meanReadIoLatency_ = 0D;
        onChanged();
        return this;
      }

      private double meanWriteIoLatency_ ;
      /**
       * <code>optional double meanWriteIoLatency = 4;</code>
       * @return Whether the meanWriteIoLatency field is set.
       */
      @java.lang.Override
      public boolean hasMeanWriteIoLatency() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional double meanWriteIoLatency = 4;</code>
       * @return The meanWriteIoLatency.
       */
      @java.lang.Override
      public double getMeanWriteIoLatency() {
        return meanWriteIoLatency_;
      }
      /**
       * <code>optional double meanWriteIoLatency = 4;</code>
       * @param value The meanWriteIoLatency to set.
       * @return This builder for chaining.
       */
      public Builder setMeanWriteIoLatency(double value) {

        meanWriteIoLatency_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional double meanWriteIoLatency = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearMeanWriteIoLatency() {
        bitField0_ = (bitField0_ & ~0x00000008);
        meanWriteIoLatency_ = 0D;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.datanode.SlowDiskReportProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.SlowDiskReportProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SlowDiskReportProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SlowDiskReportProto>() {
      @java.lang.Override
      public SlowDiskReportProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SlowDiskReportProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SlowDiskReportProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  /**
   * <pre>
   **
   * Protocol used from datanode to the namenode
   * See the request and response for details of rpc call.
   * </pre>
   *
   * Protobuf service {@code hadoop.hdfs.datanode.DatanodeProtocolService}
   */
  public static abstract class DatanodeProtocolService
      implements org.apache.hadoop.thirdparty.protobuf.Service {
    protected DatanodeProtocolService() {}

    public interface Interface {
      /**
       * <pre>
       **
       * Register a datanode at a namenode
       * </pre>
       *
       * <code>rpc registerDatanode(.hadoop.hdfs.datanode.RegisterDatanodeRequestProto) returns (.hadoop.hdfs.datanode.RegisterDatanodeResponseProto);</code>
       */
      public abstract void registerDatanode(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done);

      /**
       * <pre>
       **
       * Send heartbeat from datanode to namenode
       * </pre>
       *
       * <code>rpc sendHeartbeat(.hadoop.hdfs.datanode.HeartbeatRequestProto) returns (.hadoop.hdfs.datanode.HeartbeatResponseProto);</code>
       */
      public abstract void sendHeartbeat(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done);

      /**
       * <pre>
       **
       * Report blocks at a given datanode to the namenode
       * </pre>
       *
       * <code>rpc blockReport(.hadoop.hdfs.datanode.BlockReportRequestProto) returns (.hadoop.hdfs.datanode.BlockReportResponseProto);</code>
       */
      public abstract void blockReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done);

      /**
       * <pre>
       **
       * Report cached blocks at a datanode to the namenode
       * </pre>
       *
       * <code>rpc cacheReport(.hadoop.hdfs.datanode.CacheReportRequestProto) returns (.hadoop.hdfs.datanode.CacheReportResponseProto);</code>
       */
      public abstract void cacheReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto> done);

      /**
       * <pre>
       **
       * Incremental block report from the DN. This contains info about recently
       * received and deleted blocks, as well as when blocks start being
       * received.
       * </pre>
       *
       * <code>rpc blockReceivedAndDeleted(.hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto) returns (.hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto);</code>
       */
      public abstract void blockReceivedAndDeleted(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done);

      /**
       * <pre>
       **
       * Report from a datanode of an error to the active namenode.
       * Used for debugging.
       * </pre>
       *
       * <code>rpc errorReport(.hadoop.hdfs.datanode.ErrorReportRequestProto) returns (.hadoop.hdfs.datanode.ErrorReportResponseProto);</code>
       */
      public abstract void errorReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done);

      /**
       * <pre>
       **
       * Request the version
       * </pre>
       *
       * <code>rpc versionRequest(.hadoop.hdfs.VersionRequestProto) returns (.hadoop.hdfs.VersionResponseProto);</code>
       */
      public abstract void versionRequest(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto> done);

      /**
       * <pre>
       **
       * Report corrupt blocks at the specified location
       * </pre>
       *
       * <code>rpc reportBadBlocks(.hadoop.hdfs.datanode.ReportBadBlocksRequestProto) returns (.hadoop.hdfs.datanode.ReportBadBlocksResponseProto);</code>
       */
      public abstract void reportBadBlocks(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done);

      /**
       * <pre>
       **
       * Commit block synchronization during lease recovery.
       * </pre>
       *
       * <code>rpc commitBlockSynchronization(.hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto) returns (.hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto);</code>
       */
      public abstract void commitBlockSynchronization(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done);

    }

    public static org.apache.hadoop.thirdparty.protobuf.Service newReflectiveService(
        final Interface impl) {
      return new DatanodeProtocolService() {
        @java.lang.Override
        public  void registerDatanode(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done) {
          impl.registerDatanode(controller, request, done);
        }

        @java.lang.Override
        public  void sendHeartbeat(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done) {
          impl.sendHeartbeat(controller, request, done);
        }

        @java.lang.Override
        public  void blockReport(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done) {
          impl.blockReport(controller, request, done);
        }

        @java.lang.Override
        public  void cacheReport(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto> done) {
          impl.cacheReport(controller, request, done);
        }

        @java.lang.Override
        public  void blockReceivedAndDeleted(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done) {
          impl.blockReceivedAndDeleted(controller, request, done);
        }

        @java.lang.Override
        public  void errorReport(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done) {
          impl.errorReport(controller, request, done);
        }

        @java.lang.Override
        public  void versionRequest(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto> done) {
          impl.versionRequest(controller, request, done);
        }

        @java.lang.Override
        public  void reportBadBlocks(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done) {
          impl.reportBadBlocks(controller, request, done);
        }

        @java.lang.Override
        public  void commitBlockSynchronization(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done) {
          impl.commitBlockSynchronization(controller, request, done);
        }

      };
    }

    public static org.apache.hadoop.thirdparty.protobuf.BlockingService
        newReflectiveBlockingService(final BlockingInterface impl) {
      return new org.apache.hadoop.thirdparty.protobuf.BlockingService() {
        public final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
            getDescriptorForType() {
          return getDescriptor();
        }

        public final org.apache.hadoop.thirdparty.protobuf.Message callBlockingMethod(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method,
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.thirdparty.protobuf.Message request)
            throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.callBlockingMethod() given method descriptor for " +
              "wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return impl.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request);
            case 1:
              return impl.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request);
            case 2:
              return impl.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request);
            case 3:
              return impl.cacheReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto)request);
            case 4:
              return impl.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request);
            case 5:
              return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request);
            case 6:
              return impl.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto)request);
            case 7:
              return impl.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request);
            case 8:
              return impl.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request);
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

        public final org.apache.hadoop.thirdparty.protobuf.Message
            getRequestPrototype(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getRequestPrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.getDefaultInstance();
            case 4:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
            case 5:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
            case 6:
              return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.getDefaultInstance();
            case 7:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
            case 8:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

        public final org.apache.hadoop.thirdparty.protobuf.Message
            getResponsePrototype(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getResponsePrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance();
            case 4:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
            case 5:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
            case 6:
              return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance();
            case 7:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
            case 8:
              return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

      };
    }

    /**
     * <pre>
     **
     * Register a datanode at a namenode
     * </pre>
     *
     * <code>rpc registerDatanode(.hadoop.hdfs.datanode.RegisterDatanodeRequestProto) returns (.hadoop.hdfs.datanode.RegisterDatanodeResponseProto);</code>
     */
    public abstract void registerDatanode(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done);

    /**
     * <pre>
     **
     * Send heartbeat from datanode to namenode
     * </pre>
     *
     * <code>rpc sendHeartbeat(.hadoop.hdfs.datanode.HeartbeatRequestProto) returns (.hadoop.hdfs.datanode.HeartbeatResponseProto);</code>
     */
    public abstract void sendHeartbeat(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done);

    /**
     * <pre>
     **
     * Report blocks at a given datanode to the namenode
     * </pre>
     *
     * <code>rpc blockReport(.hadoop.hdfs.datanode.BlockReportRequestProto) returns (.hadoop.hdfs.datanode.BlockReportResponseProto);</code>
     */
    public abstract void blockReport(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done);

    /**
     * <pre>
     **
     * Report cached blocks at a datanode to the namenode
     * </pre>
     *
     * <code>rpc cacheReport(.hadoop.hdfs.datanode.CacheReportRequestProto) returns (.hadoop.hdfs.datanode.CacheReportResponseProto);</code>
     */
    public abstract void cacheReport(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto> done);

    /**
     * <pre>
     **
     * Incremental block report from the DN. This contains info about recently
     * received and deleted blocks, as well as when blocks start being
     * received.
     * </pre>
     *
     * <code>rpc blockReceivedAndDeleted(.hadoop.hdfs.datanode.BlockReceivedAndDeletedRequestProto) returns (.hadoop.hdfs.datanode.BlockReceivedAndDeletedResponseProto);</code>
     */
    public abstract void blockReceivedAndDeleted(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done);

    /**
     * <pre>
     **
     * Report from a datanode of an error to the active namenode.
     * Used for debugging.
     * </pre>
     *
     * <code>rpc errorReport(.hadoop.hdfs.datanode.ErrorReportRequestProto) returns (.hadoop.hdfs.datanode.ErrorReportResponseProto);</code>
     */
    public abstract void errorReport(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done);

    /**
     * <pre>
     **
     * Request the version
     * </pre>
     *
     * <code>rpc versionRequest(.hadoop.hdfs.VersionRequestProto) returns (.hadoop.hdfs.VersionResponseProto);</code>
     */
    public abstract void versionRequest(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto> done);

    /**
     * <pre>
     **
     * Report corrupt blocks at the specified location
     * </pre>
     *
     * <code>rpc reportBadBlocks(.hadoop.hdfs.datanode.ReportBadBlocksRequestProto) returns (.hadoop.hdfs.datanode.ReportBadBlocksResponseProto);</code>
     */
    public abstract void reportBadBlocks(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done);

    /**
     * <pre>
     **
     * Commit block synchronization during lease recovery.
     * </pre>
     *
     * <code>rpc commitBlockSynchronization(.hadoop.hdfs.datanode.CommitBlockSynchronizationRequestProto) returns (.hadoop.hdfs.datanode.CommitBlockSynchronizationResponseProto);</code>
     */
    public abstract void commitBlockSynchronization(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done);

    public static final
        org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.getDescriptor().getServices().get(0);
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }

    public final void callMethod(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method,
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.thirdparty.protobuf.Message request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<
          org.apache.hadoop.thirdparty.protobuf.Message> done) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.callMethod() given method descriptor for wrong " +
          "service type.");
      }
      switch(method.getIndex()) {
        case 0:
          this.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto>specializeCallback(
              done));
          return;
        case 1:
          this.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto>specializeCallback(
              done));
          return;
        case 2:
          this.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto>specializeCallback(
              done));
          return;
        case 3:
          this.cacheReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto>specializeCallback(
              done));
          return;
        case 4:
          this.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto>specializeCallback(
              done));
          return;
        case 5:
          this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto>specializeCallback(
              done));
          return;
        case 6:
          this.versionRequest(controller, (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto>specializeCallback(
              done));
          return;
        case 7:
          this.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto>specializeCallback(
              done));
          return;
        case 8:
          this.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto>specializeCallback(
              done));
          return;
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public final org.apache.hadoop.thirdparty.protobuf.Message
        getRequestPrototype(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getRequestPrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto.getDefaultInstance();
        case 4:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
        case 5:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
        case 6:
          return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto.getDefaultInstance();
        case 7:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
        case 8:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public final org.apache.hadoop.thirdparty.protobuf.Message
        getResponsePrototype(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getResponsePrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance();
        case 4:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
        case 5:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
        case 6:
          return org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance();
        case 7:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
        case 8:
          return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public static Stub newStub(
        org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) {
      return new Stub(channel);
    }

    public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService implements Interface {
      private Stub(org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) {
        this.channel = channel;
      }

      private final org.apache.hadoop.thirdparty.protobuf.RpcChannel channel;

      public org.apache.hadoop.thirdparty.protobuf.RpcChannel getChannel() {
        return channel;
      }

      public  void registerDatanode(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()));
      }

      public  void sendHeartbeat(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
      }

      public  void blockReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()));
      }

      public  void cacheReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance()));
      }

      public  void blockReceivedAndDeleted(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(4),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()));
      }

      public  void errorReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(5),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()));
      }

      public  void versionRequest(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(6),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance()));
      }

      public  void reportBadBlocks(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(7),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()));
      }

      public  void commitBlockSynchronization(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(8),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class,
            org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()));
      }
    }

    public static BlockingInterface newBlockingStub(
        org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) {
      return new BlockingStub(channel);
    }

    public interface BlockingInterface {
      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto cacheReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto versionRequest(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;
    }

    private static final class BlockingStub implements BlockingInterface {
      private BlockingStub(org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) {
        this.channel = channel;
      }

      private final org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel;

      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto cacheReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(4),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(5),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto versionRequest(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(6),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(7),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(8),
          controller,
          request,
          org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance());
      }

    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.datanode.DatanodeProtocolService)
  }

  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_RegisterCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockReportContextProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_fieldAccessorTable;

  public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static  org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\026DatanodeProtocol.proto\022\024hadoop.hdfs.da" +
      "tanode\032\nhdfs.proto\032\023erasurecoding.proto\032" +
      "\020HdfsServer.proto\"\315\001\n\031DatanodeRegistrati" +
      "onProto\0220\n\ndatanodeID\030\001 \002(\0132\034.hadoop.hdf" +
      "s.DatanodeIDProto\0222\n\013storageInfo\030\002 \002(\0132\035" +
      ".hadoop.hdfs.StorageInfoProto\0221\n\004keys\030\003 " +
      "\002(\0132#.hadoop.hdfs.ExportedBlockKeysProto" +
      "\022\027\n\017softwareVersion\030\004 \002(\t\"\370\006\n\024DatanodeCo" +
      "mmandProto\022@\n\007cmdType\030\001 \002(\0162/.hadoop.hdf" +
      "s.datanode.DatanodeCommandProto.Type\022H\n\013" +
      "balancerCmd\030\002 \001(\01323.hadoop.hdfs.datanode" +
      ".BalancerBandwidthCommandProto\0227\n\006blkCmd" +
      "\030\003 \001(\0132\'.hadoop.hdfs.datanode.BlockComma" +
      "ndProto\022D\n\013recoveryCmd\030\004 \001(\0132/.hadoop.hd" +
      "fs.datanode.BlockRecoveryCommandProto\022?\n" +
      "\013finalizeCmd\030\005 \001(\0132*.hadoop.hdfs.datanod" +
      "e.FinalizeCommandProto\022A\n\014keyUpdateCmd\030\006" +
      " \001(\0132+.hadoop.hdfs.datanode.KeyUpdateCom" +
      "mandProto\022?\n\013registerCmd\030\007 \001(\0132*.hadoop." +
      "hdfs.datanode.RegisterCommandProto\022;\n\010bl" +
      "kIdCmd\030\010 \001(\0132).hadoop.hdfs.datanode.Bloc" +
      "kIdCommandProto\022W\n\026blkECReconstructionCm" +
      "d\030\t \001(\01327.hadoop.hdfs.datanode.BlockECRe" +
      "constructionCommandProto\"\371\001\n\004Type\022\034\n\030Bal" +
      "ancerBandwidthCommand\020\000\022\020\n\014BlockCommand\020" +
      "\001\022\030\n\024BlockRecoveryCommand\020\002\022\023\n\017FinalizeC" +
      "ommand\020\003\022\024\n\020KeyUpdateCommand\020\004\022\023\n\017Regist" +
      "erCommand\020\005\022\030\n\024UnusedUpgradeCommand\020\006\022\027\n" +
      "\023NullDatanodeCommand\020\007\022\022\n\016BlockIdCommand" +
      "\020\010\022 \n\034BlockECReconstructionCommand\020\t\"2\n\035" +
      "BalancerBandwidthCommandProto\022\021\n\tbandwid" +
      "th\030\001 \002(\004\"\361\002\n\021BlockCommandProto\022>\n\006action" +
      "\030\001 \002(\0162..hadoop.hdfs.datanode.BlockComma" +
      "ndProto.Action\022\023\n\013blockPoolId\030\002 \002(\t\022\'\n\006b" +
      "locks\030\003 \003(\0132\027.hadoop.hdfs.BlockProto\0220\n\007" +
      "targets\030\004 \003(\0132\037.hadoop.hdfs.DatanodeInfo" +
      "sProto\022:\n\022targetStorageUuids\030\005 \003(\0132\036.had" +
      "oop.hdfs.StorageUuidsProto\022:\n\022targetStor" +
      "ageTypes\030\006 \003(\0132\036.hadoop.hdfs.StorageType" +
      "sProto\"4\n\006Action\022\014\n\010TRANSFER\020\001\022\016\n\nINVALI" +
      "DATE\020\002\022\014\n\010SHUTDOWN\020\003\"\244\001\n\023BlockIdCommandP" +
      "roto\022@\n\006action\030\001 \002(\01620.hadoop.hdfs.datan" +
      "ode.BlockIdCommandProto.Action\022\023\n\013blockP" +
      "oolId\030\002 \002(\t\022\024\n\010blockIds\030\003 \003(\004B\002\020\001\" \n\006Act" +
      "ion\022\t\n\005CACHE\020\001\022\013\n\007UNCACHE\020\002\"N\n\031BlockReco" +
      "veryCommandProto\0221\n\006blocks\030\001 \003(\0132!.hadoo" +
      "p.hdfs.RecoveringBlockProto\"+\n\024FinalizeC" +
      "ommandProto\022\023\n\013blockPoolId\030\001 \002(\t\"J\n\025KeyU" +
      "pdateCommandProto\0221\n\004keys\030\001 \002(\0132#.hadoop" +
      ".hdfs.ExportedBlockKeysProto\"\026\n\024Register" +
      "CommandProto\"s\n!BlockECReconstructionCom" +
      "mandProto\022N\n\031blockECReconstructioninfo\030\001" +
      " \003(\0132+.hadoop.hdfs.BlockECReconstruction" +
      "InfoProto\"e\n\034RegisterDatanodeRequestProt" +
      "o\022E\n\014registration\030\001 \002(\0132/.hadoop.hdfs.da" +
      "tanode.DatanodeRegistrationProto\"f\n\035Regi" +
      "sterDatanodeResponseProto\022E\n\014registratio" +
      "n\030\001 \002(\0132/.hadoop.hdfs.datanode.DatanodeR" +
      "egistrationProto\"~\n\031VolumeFailureSummary" +
      "Proto\022\036\n\026failedStorageLocations\030\001 \003(\t\022\035\n" +
      "\025lastVolumeFailureDate\030\002 \002(\004\022\"\n\032estimate" +
      "dCapacityLostTotal\030\003 \002(\004\"\206\004\n\025HeartbeatRe" +
      "questProto\022E\n\014registration\030\001 \002(\0132/.hadoo" +
      "p.hdfs.datanode.DatanodeRegistrationProt" +
      "o\0220\n\007reports\030\002 \003(\0132\037.hadoop.hdfs.Storage" +
      "ReportProto\022\032\n\017xmitsInProgress\030\003 \001(\r:\0010\022" +
      "\027\n\014xceiverCount\030\004 \001(\r:\0010\022\030\n\rfailedVolume" +
      "s\030\005 \001(\r:\0010\022\030\n\rcacheCapacity\030\006 \001(\004:\0010\022\024\n\t" +
      "cacheUsed\030\007 \001(\004:\0010\022M\n\024volumeFailureSumma" +
      "ry\030\010 \001(\0132/.hadoop.hdfs.datanode.VolumeFa" +
      "ilureSummaryProto\022*\n\033requestFullBlockRep" +
      "ortLease\030\t \001(\010:\005false\022<\n\tslowPeers\030\n \003(\013" +
      "2).hadoop.hdfs.datanode.SlowPeerReportPr" +
      "oto\022<\n\tslowDisks\030\013 \003(\0132).hadoop.hdfs.dat" +
      "anode.SlowDiskReportProto\"\327\002\n\026HeartbeatR" +
      "esponseProto\0228\n\004cmds\030\001 \003(\0132*.hadoop.hdfs" +
      ".datanode.DatanodeCommandProto\0227\n\010haStat" +
      "us\030\002 \002(\0132%.hadoop.hdfs.NNHAStatusHeartbe" +
      "atProto\022D\n\024rollingUpgradeStatus\030\003 \001(\0132&." +
      "hadoop.hdfs.RollingUpgradeStatusProto\022F\n" +
      "\026rollingUpgradeStatusV2\030\004 \001(\0132&.hadoop.h" +
      "dfs.RollingUpgradeStatusProto\022!\n\026fullBlo" +
      "ckReportLeaseId\030\005 \001(\004:\0010\022\031\n\nisSlownode\030\006" +
      " \001(\010:\005false\"\365\001\n\027BlockReportRequestProto\022" +
      "E\n\014registration\030\001 \002(\0132/.hadoop.hdfs.data" +
      "node.DatanodeRegistrationProto\022\023\n\013blockP" +
      "oolId\030\002 \002(\t\022>\n\007reports\030\003 \003(\0132-.hadoop.hd" +
      "fs.datanode.StorageBlockReportProto\022>\n\007c" +
      "ontext\030\004 \001(\0132-.hadoop.hdfs.datanode.Bloc" +
      "kReportContextProto\"\\\n\027BlockReportContex" +
      "tProto\022\021\n\ttotalRpcs\030\001 \002(\005\022\016\n\006curRpc\030\002 \002(" +
      "\005\022\n\n\002id\030\003 \002(\003\022\022\n\007leaseId\030\004 \001(\004:\0010\"\220\001\n\027St" +
      "orageBlockReportProto\0222\n\007storage\030\001 \002(\0132!" +
      ".hadoop.hdfs.DatanodeStorageProto\022\022\n\006blo" +
      "cks\030\002 \003(\004B\002\020\001\022\026\n\016numberOfBlocks\030\003 \001(\004\022\025\n" +
      "\rblocksBuffers\030\004 \003(\014\"S\n\030BlockReportRespo" +
      "nseProto\0227\n\003cmd\030\001 \001(\0132*.hadoop.hdfs.data" +
      "node.DatanodeCommandProto\"\211\001\n\027CacheRepor" +
      "tRequestProto\022E\n\014registration\030\001 \002(\0132/.ha" +
      "doop.hdfs.datanode.DatanodeRegistrationP" +
      "roto\022\023\n\013blockPoolId\030\002 \002(\t\022\022\n\006blocks\030\003 \003(" +
      "\004B\002\020\001\"S\n\030CacheReportResponseProto\0227\n\003cmd" +
      "\030\001 \001(\0132*.hadoop.hdfs.datanode.DatanodeCo" +
      "mmandProto\"\345\001\n\035ReceivedDeletedBlockInfoP" +
      "roto\022&\n\005block\030\001 \002(\0132\027.hadoop.hdfs.BlockP" +
      "roto\022O\n\006status\030\003 \002(\0162?.hadoop.hdfs.datan" +
      "ode.ReceivedDeletedBlockInfoProto.BlockS" +
      "tatus\022\022\n\ndeleteHint\030\002 \001(\t\"7\n\013BlockStatus" +
      "\022\r\n\tRECEIVING\020\001\022\014\n\010RECEIVED\020\002\022\013\n\007DELETED" +
      "\020\003\"\265\001\n!StorageReceivedDeletedBlocksProto" +
      "\022\027\n\013storageUuid\030\001 \002(\tB\002\030\001\022C\n\006blocks\030\002 \003(" +
      "\01323.hadoop.hdfs.datanode.ReceivedDeleted" +
      "BlockInfoProto\0222\n\007storage\030\003 \001(\0132!.hadoop" +
      ".hdfs.DatanodeStorageProto\"\312\001\n#BlockRece" +
      "ivedAndDeletedRequestProto\022E\n\014registrati" +
      "on\030\001 \002(\0132/.hadoop.hdfs.datanode.Datanode" +
      "RegistrationProto\022\023\n\013blockPoolId\030\002 \002(\t\022G" +
      "\n\006blocks\030\003 \003(\01327.hadoop.hdfs.datanode.St" +
      "orageReceivedDeletedBlocksProto\"&\n$Block" +
      "ReceivedAndDeletedResponseProto\"\322\001\n\027Erro" +
      "rReportRequestProto\022E\n\014registartion\030\001 \002(" +
      "\0132/.hadoop.hdfs.datanode.DatanodeRegistr" +
      "ationProto\022\021\n\terrorCode\030\002 \002(\r\022\013\n\003msg\030\003 \002" +
      "(\t\"P\n\tErrorCode\022\n\n\006NOTIFY\020\000\022\016\n\nDISK_ERRO" +
      "R\020\001\022\021\n\rINVALID_BLOCK\020\002\022\024\n\020FATAL_DISK_ERR" +
      "OR\020\003\"\032\n\030ErrorReportResponseProto\"M\n\033Repo" +
      "rtBadBlocksRequestProto\022.\n\006blocks\030\001 \003(\0132" +
      "\036.hadoop.hdfs.LocatedBlockProto\"\036\n\034Repor" +
      "tBadBlocksResponseProto\"\366\001\n&CommitBlockS" +
      "ynchronizationRequestProto\022.\n\005block\030\001 \002(" +
      "\0132\037.hadoop.hdfs.ExtendedBlockProto\022\023\n\013ne" +
      "wGenStamp\030\002 \002(\004\022\021\n\tnewLength\030\003 \002(\004\022\021\n\tcl" +
      "oseFile\030\004 \002(\010\022\023\n\013deleteBlock\030\005 \002(\010\0221\n\013ne" +
      "wTaragets\030\006 \003(\0132\034.hadoop.hdfs.DatanodeID" +
      "Proto\022\031\n\021newTargetStorages\030\007 \003(\t\")\n\'Comm" +
      "itBlockSynchronizationResponseProto\"{\n\023S" +
      "lowPeerReportProto\022\022\n\ndataNodeId\030\001 \001(\t\022\030" +
      "\n\020aggregateLatency\030\002 \001(\001\022\016\n\006median\030\003 \001(\001" +
      "\022\013\n\003mad\030\004 \001(\001\022\031\n\021upperLimitLatency\030\005 \001(\001" +
      "\"}\n\023SlowDiskReportProto\022\020\n\010basePath\030\001 \001(" +
      "\t\022\035\n\025meanMetadataOpLatency\030\002 \001(\001\022\031\n\021mean" +
      "ReadIoLatency\030\003 \001(\001\022\032\n\022meanWriteIoLatenc" +
      "y\030\004 \001(\0012\314\010\n\027DatanodeProtocolService\022{\n\020r" +
      "egisterDatanode\0222.hadoop.hdfs.datanode.R" +
      "egisterDatanodeRequestProto\0323.hadoop.hdf" +
      "s.datanode.RegisterDatanodeResponseProto" +
      "\022j\n\rsendHeartbeat\022+.hadoop.hdfs.datanode" +
      ".HeartbeatRequestProto\032,.hadoop.hdfs.dat" +
      "anode.HeartbeatResponseProto\022l\n\013blockRep" +
      "ort\022-.hadoop.hdfs.datanode.BlockReportRe" +
      "questProto\032..hadoop.hdfs.datanode.BlockR" +
      "eportResponseProto\022l\n\013cacheReport\022-.hado" +
      "op.hdfs.datanode.CacheReportRequestProto" +
      "\032..hadoop.hdfs.datanode.CacheReportRespo" +
      "nseProto\022\220\001\n\027blockReceivedAndDeleted\0229.h" +
      "adoop.hdfs.datanode.BlockReceivedAndDele" +
      "tedRequestProto\032:.hadoop.hdfs.datanode.B" +
      "lockReceivedAndDeletedResponseProto\022l\n\013e" +
      "rrorReport\022-.hadoop.hdfs.datanode.ErrorR" +
      "eportRequestProto\032..hadoop.hdfs.datanode" +
      ".ErrorReportResponseProto\022U\n\016versionRequ" +
      "est\022 .hadoop.hdfs.VersionRequestProto\032!." +
      "hadoop.hdfs.VersionResponseProto\022x\n\017repo" +
      "rtBadBlocks\0221.hadoop.hdfs.datanode.Repor" +
      "tBadBlocksRequestProto\0322.hadoop.hdfs.dat" +
      "anode.ReportBadBlocksResponseProto\022\231\001\n\032c" +
      "ommitBlockSynchronization\022<.hadoop.hdfs." +
      "datanode.CommitBlockSynchronizationReque" +
      "stProto\032=.hadoop.hdfs.datanode.CommitBlo" +
      "ckSynchronizationResponseProtoBE\n%org.ap" +
      "ache.hadoop.hdfs.protocol.protoB\026Datanod" +
      "eProtocolProtos\210\001\001\240\001\001"
    };
    descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
          org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.getDescriptor(),
          org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.getDescriptor(),
        });
    internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor =
      getDescriptor().getMessageTypes().get(0);
    internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_DatanodeRegistrationProto_descriptor,
        new java.lang.String[] { "DatanodeID", "StorageInfo", "Keys", "SoftwareVersion", });
    internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(1);
    internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_DatanodeCommandProto_descriptor,
        new java.lang.String[] { "CmdType", "BalancerCmd", "BlkCmd", "RecoveryCmd", "FinalizeCmd", "KeyUpdateCmd", "RegisterCmd", "BlkIdCmd", "BlkECReconstructionCmd", });
    internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(2);
    internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BalancerBandwidthCommandProto_descriptor,
        new java.lang.String[] { "Bandwidth", });
    internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(3);
    internal_static_hadoop_hdfs_datanode_BlockCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockCommandProto_descriptor,
        new java.lang.String[] { "Action", "BlockPoolId", "Blocks", "Targets", "TargetStorageUuids", "TargetStorageTypes", });
    internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(4);
    internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockIdCommandProto_descriptor,
        new java.lang.String[] { "Action", "BlockPoolId", "BlockIds", });
    internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(5);
    internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockRecoveryCommandProto_descriptor,
        new java.lang.String[] { "Blocks", });
    internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(6);
    internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_FinalizeCommandProto_descriptor,
        new java.lang.String[] { "BlockPoolId", });
    internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(7);
    internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_KeyUpdateCommandProto_descriptor,
        new java.lang.String[] { "Keys", });
    internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(8);
    internal_static_hadoop_hdfs_datanode_RegisterCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_RegisterCommandProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_descriptor =
      getDescriptor().getMessageTypes().get(9);
    internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockECReconstructionCommandProto_descriptor,
        new java.lang.String[] { "BlockECReconstructioninfo", });
    internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(10);
    internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_RegisterDatanodeRequestProto_descriptor,
        new java.lang.String[] { "Registration", });
    internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(11);
    internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_RegisterDatanodeResponseProto_descriptor,
        new java.lang.String[] { "Registration", });
    internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor =
      getDescriptor().getMessageTypes().get(12);
    internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_VolumeFailureSummaryProto_descriptor,
        new java.lang.String[] { "FailedStorageLocations", "LastVolumeFailureDate", "EstimatedCapacityLostTotal", });
    internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(13);
    internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_HeartbeatRequestProto_descriptor,
        new java.lang.String[] { "Registration", "Reports", "XmitsInProgress", "XceiverCount", "FailedVolumes", "CacheCapacity", "CacheUsed", "VolumeFailureSummary", "RequestFullBlockReportLease", "SlowPeers", "SlowDisks", });
    internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(14);
    internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_HeartbeatResponseProto_descriptor,
        new java.lang.String[] { "Cmds", "HaStatus", "RollingUpgradeStatus", "RollingUpgradeStatusV2", "FullBlockReportLeaseId", "IsSlownode", });
    internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(15);
    internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockReportRequestProto_descriptor,
        new java.lang.String[] { "Registration", "BlockPoolId", "Reports", "Context", });
    internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor =
      getDescriptor().getMessageTypes().get(16);
    internal_static_hadoop_hdfs_datanode_BlockReportContextProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockReportContextProto_descriptor,
        new java.lang.String[] { "TotalRpcs", "CurRpc", "Id", "LeaseId", });
    internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor =
      getDescriptor().getMessageTypes().get(17);
    internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_StorageBlockReportProto_descriptor,
        new java.lang.String[] { "Storage", "Blocks", "NumberOfBlocks", "BlocksBuffers", });
    internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(18);
    internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockReportResponseProto_descriptor,
        new java.lang.String[] { "Cmd", });
    internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(19);
    internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_CacheReportRequestProto_descriptor,
        new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", });
    internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(20);
    internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_CacheReportResponseProto_descriptor,
        new java.lang.String[] { "Cmd", });
    internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(21);
    internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_ReceivedDeletedBlockInfoProto_descriptor,
        new java.lang.String[] { "Block", "Status", "DeleteHint", });
    internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor =
      getDescriptor().getMessageTypes().get(22);
    internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_StorageReceivedDeletedBlocksProto_descriptor,
        new java.lang.String[] { "StorageUuid", "Blocks", "Storage", });
    internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(23);
    internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedRequestProto_descriptor,
        new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", });
    internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(24);
    internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_BlockReceivedAndDeletedResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(25);
    internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_ErrorReportRequestProto_descriptor,
        new java.lang.String[] { "Registartion", "ErrorCode", "Msg", });
    internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(26);
    internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_ErrorReportResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(27);
    internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_ReportBadBlocksRequestProto_descriptor,
        new java.lang.String[] { "Blocks", });
    internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(28);
    internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_ReportBadBlocksResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(29);
    internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationRequestProto_descriptor,
        new java.lang.String[] { "Block", "NewGenStamp", "NewLength", "CloseFile", "DeleteBlock", "NewTaragets", "NewTargetStorages", });
    internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(30);
    internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_CommitBlockSynchronizationResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_descriptor =
      getDescriptor().getMessageTypes().get(31);
    internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_SlowPeerReportProto_descriptor,
        new java.lang.String[] { "DataNodeId", "AggregateLatency", "Median", "Mad", "UpperLimitLatency", });
    internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_descriptor =
      getDescriptor().getMessageTypes().get(32);
    internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_datanode_SlowDiskReportProto_descriptor,
        new java.lang.String[] { "BasePath", "MeanMetadataOpLatency", "MeanReadIoLatency", "MeanWriteIoLatency", });
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor();
    org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.getDescriptor();
    org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.getDescriptor();
  }

  // @@protoc_insertion_point(outer_class_scope)
}