ClientAMProtocol.java

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: ClientAMProtocol.proto

// Protobuf Java Version: 3.25.5
package org.apache.hadoop.yarn.proto;

public final class ClientAMProtocol {
  private ClientAMProtocol() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  public interface FlexComponentsRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.FlexComponentsRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    java.util.List<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto> 
        getComponentsList();
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto getComponents(int index);
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    int getComponentsCount();
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder> 
        getComponentsOrBuilderList();
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder getComponentsOrBuilder(
        int index);
  }
  /**
   * Protobuf type {@code hadoop.yarn.FlexComponentsRequestProto}
   */
  public static final class FlexComponentsRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.FlexComponentsRequestProto)
      FlexComponentsRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FlexComponentsRequestProto.newBuilder() to construct.
    private FlexComponentsRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FlexComponentsRequestProto() {
      components_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FlexComponentsRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.Builder.class);
    }

    public static final int COMPONENTS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto> components_;
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto> getComponentsList() {
      return components_;
    }
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder> 
        getComponentsOrBuilderList() {
      return components_;
    }
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    @java.lang.Override
    public int getComponentsCount() {
      return components_.size();
    }
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto getComponents(int index) {
      return components_.get(index);
    }
    /**
     * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder getComponentsOrBuilder(
        int index) {
      return components_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < components_.size(); i++) {
        output.writeMessage(1, components_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < components_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, components_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto) obj;

      if (!getComponentsList()
          .equals(other.getComponentsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getComponentsCount() > 0) {
        hash = (37 * hash) + COMPONENTS_FIELD_NUMBER;
        hash = (53 * hash) + getComponentsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.FlexComponentsRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.FlexComponentsRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (componentsBuilder_ == null) {
          components_ = java.util.Collections.emptyList();
        } else {
          components_ = null;
          componentsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto result) {
        if (componentsBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            components_ = java.util.Collections.unmodifiableList(components_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.components_ = components_;
        } else {
          result.components_ = componentsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.getDefaultInstance()) return this;
        if (componentsBuilder_ == null) {
          if (!other.components_.isEmpty()) {
            if (components_.isEmpty()) {
              components_ = other.components_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureComponentsIsMutable();
              components_.addAll(other.components_);
            }
            onChanged();
          }
        } else {
          if (!other.components_.isEmpty()) {
            if (componentsBuilder_.isEmpty()) {
              componentsBuilder_.dispose();
              componentsBuilder_ = null;
              components_ = other.components_;
              bitField0_ = (bitField0_ & ~0x00000001);
              componentsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getComponentsFieldBuilder() : null;
            } else {
              componentsBuilder_.addAllMessages(other.components_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto m =
                    input.readMessage(
                        org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.PARSER,
                        extensionRegistry);
                if (componentsBuilder_ == null) {
                  ensureComponentsIsMutable();
                  components_.add(m);
                } else {
                  componentsBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto> components_ =
        java.util.Collections.emptyList();
      private void ensureComponentsIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          components_ = new java.util.ArrayList<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto>(components_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder> componentsBuilder_;

      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public java.util.List<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto> getComponentsList() {
        if (componentsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(components_);
        } else {
          return componentsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public int getComponentsCount() {
        if (componentsBuilder_ == null) {
          return components_.size();
        } else {
          return componentsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto getComponents(int index) {
        if (componentsBuilder_ == null) {
          return components_.get(index);
        } else {
          return componentsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder setComponents(
          int index, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto value) {
        if (componentsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureComponentsIsMutable();
          components_.set(index, value);
          onChanged();
        } else {
          componentsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder setComponents(
          int index, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder builderForValue) {
        if (componentsBuilder_ == null) {
          ensureComponentsIsMutable();
          components_.set(index, builderForValue.build());
          onChanged();
        } else {
          componentsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder addComponents(org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto value) {
        if (componentsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureComponentsIsMutable();
          components_.add(value);
          onChanged();
        } else {
          componentsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder addComponents(
          int index, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto value) {
        if (componentsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureComponentsIsMutable();
          components_.add(index, value);
          onChanged();
        } else {
          componentsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder addComponents(
          org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder builderForValue) {
        if (componentsBuilder_ == null) {
          ensureComponentsIsMutable();
          components_.add(builderForValue.build());
          onChanged();
        } else {
          componentsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder addComponents(
          int index, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder builderForValue) {
        if (componentsBuilder_ == null) {
          ensureComponentsIsMutable();
          components_.add(index, builderForValue.build());
          onChanged();
        } else {
          componentsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder addAllComponents(
          java.lang.Iterable<? extends org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto> values) {
        if (componentsBuilder_ == null) {
          ensureComponentsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, components_);
          onChanged();
        } else {
          componentsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder clearComponents() {
        if (componentsBuilder_ == null) {
          components_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          componentsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public Builder removeComponents(int index) {
        if (componentsBuilder_ == null) {
          ensureComponentsIsMutable();
          components_.remove(index);
          onChanged();
        } else {
          componentsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder getComponentsBuilder(
          int index) {
        return getComponentsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder getComponentsOrBuilder(
          int index) {
        if (componentsBuilder_ == null) {
          return components_.get(index);  } else {
          return componentsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder> 
           getComponentsOrBuilderList() {
        if (componentsBuilder_ != null) {
          return componentsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(components_);
        }
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder addComponentsBuilder() {
        return getComponentsFieldBuilder().addBuilder(
            org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder addComponentsBuilder(
          int index) {
        return getComponentsFieldBuilder().addBuilder(
            index, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.yarn.ComponentCountProto components = 1;</code>
       */
      public java.util.List<org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder> 
           getComponentsBuilderList() {
        return getComponentsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder> 
          getComponentsFieldBuilder() {
        if (componentsBuilder_ == null) {
          componentsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder>(
                  components_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          components_ = null;
        }
        return componentsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.FlexComponentsRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.FlexComponentsRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FlexComponentsRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FlexComponentsRequestProto>() {
      @java.lang.Override
      public FlexComponentsRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FlexComponentsRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FlexComponentsRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ComponentCountProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.ComponentCountProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string name = 1;</code>
     * @return Whether the name field is set.
     */
    boolean hasName();
    /**
     * <code>optional string name = 1;</code>
     * @return The name.
     */
    java.lang.String getName();
    /**
     * <code>optional string name = 1;</code>
     * @return The bytes for name.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getNameBytes();

    /**
     * <code>optional int64 numberOfContainers = 2;</code>
     * @return Whether the numberOfContainers field is set.
     */
    boolean hasNumberOfContainers();
    /**
     * <code>optional int64 numberOfContainers = 2;</code>
     * @return The numberOfContainers.
     */
    long getNumberOfContainers();
  }
  /**
   * Protobuf type {@code hadoop.yarn.ComponentCountProto}
   */
  public static final class ComponentCountProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.ComponentCountProto)
      ComponentCountProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ComponentCountProto.newBuilder() to construct.
    private ComponentCountProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ComponentCountProto() {
      name_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ComponentCountProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_ComponentCountProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_ComponentCountProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder.class);
    }

    private int bitField0_;
    public static final int NAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object name_ = "";
    /**
     * <code>optional string name = 1;</code>
     * @return Whether the name field is set.
     */
    @java.lang.Override
    public boolean hasName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string name = 1;</code>
     * @return The name.
     */
    @java.lang.Override
    public java.lang.String getName() {
      java.lang.Object ref = name_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          name_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string name = 1;</code>
     * @return The bytes for name.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getNameBytes() {
      java.lang.Object ref = name_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        name_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int NUMBEROFCONTAINERS_FIELD_NUMBER = 2;
    private long numberOfContainers_ = 0L;
    /**
     * <code>optional int64 numberOfContainers = 2;</code>
     * @return Whether the numberOfContainers field is set.
     */
    @java.lang.Override
    public boolean hasNumberOfContainers() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional int64 numberOfContainers = 2;</code>
     * @return The numberOfContainers.
     */
    @java.lang.Override
    public long getNumberOfContainers() {
      return numberOfContainers_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, name_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, numberOfContainers_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, name_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, numberOfContainers_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto) obj;

      if (hasName() != other.hasName()) return false;
      if (hasName()) {
        if (!getName()
            .equals(other.getName())) return false;
      }
      if (hasNumberOfContainers() != other.hasNumberOfContainers()) return false;
      if (hasNumberOfContainers()) {
        if (getNumberOfContainers()
            != other.getNumberOfContainers()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasName()) {
        hash = (37 * hash) + NAME_FIELD_NUMBER;
        hash = (53 * hash) + getName().hashCode();
      }
      if (hasNumberOfContainers()) {
        hash = (37 * hash) + NUMBEROFCONTAINERS_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumberOfContainers());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.ComponentCountProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.ComponentCountProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_ComponentCountProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_ComponentCountProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        name_ = "";
        numberOfContainers_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_ComponentCountProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.name_ = name_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.numberOfContainers_ = numberOfContainers_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto.getDefaultInstance()) return this;
        if (other.hasName()) {
          name_ = other.name_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasNumberOfContainers()) {
          setNumberOfContainers(other.getNumberOfContainers());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                name_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                numberOfContainers_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object name_ = "";
      /**
       * <code>optional string name = 1;</code>
       * @return Whether the name field is set.
       */
      public boolean hasName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string name = 1;</code>
       * @return The name.
       */
      public java.lang.String getName() {
        java.lang.Object ref = name_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            name_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string name = 1;</code>
       * @return The bytes for name.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getNameBytes() {
        java.lang.Object ref = name_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          name_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string name = 1;</code>
       * @param value The name to set.
       * @return This builder for chaining.
       */
      public Builder setName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        name_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string name = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearName() {
        name_ = getDefaultInstance().getName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string name = 1;</code>
       * @param value The bytes for name to set.
       * @return This builder for chaining.
       */
      public Builder setNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        name_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long numberOfContainers_ ;
      /**
       * <code>optional int64 numberOfContainers = 2;</code>
       * @return Whether the numberOfContainers field is set.
       */
      @java.lang.Override
      public boolean hasNumberOfContainers() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional int64 numberOfContainers = 2;</code>
       * @return The numberOfContainers.
       */
      @java.lang.Override
      public long getNumberOfContainers() {
        return numberOfContainers_;
      }
      /**
       * <code>optional int64 numberOfContainers = 2;</code>
       * @param value The numberOfContainers to set.
       * @return This builder for chaining.
       */
      public Builder setNumberOfContainers(long value) {

        numberOfContainers_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional int64 numberOfContainers = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumberOfContainers() {
        bitField0_ = (bitField0_ & ~0x00000002);
        numberOfContainers_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.ComponentCountProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.ComponentCountProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ComponentCountProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ComponentCountProto>() {
      @java.lang.Override
      public ComponentCountProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ComponentCountProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ComponentCountProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface FlexComponentsResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.FlexComponentsResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.FlexComponentsResponseProto}
   */
  public static final class FlexComponentsResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.FlexComponentsResponseProto)
      FlexComponentsResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FlexComponentsResponseProto.newBuilder() to construct.
    private FlexComponentsResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FlexComponentsResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FlexComponentsResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.FlexComponentsResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.FlexComponentsResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_FlexComponentsResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.FlexComponentsResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.FlexComponentsResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FlexComponentsResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FlexComponentsResponseProto>() {
      @java.lang.Override
      public FlexComponentsResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FlexComponentsResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FlexComponentsResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface GetStatusRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.GetStatusRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.GetStatusRequestProto}
   */
  public static final class GetStatusRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.GetStatusRequestProto)
      GetStatusRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use GetStatusRequestProto.newBuilder() to construct.
    private GetStatusRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private GetStatusRequestProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new GetStatusRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.GetStatusRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.GetStatusRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.GetStatusRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.GetStatusRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<GetStatusRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<GetStatusRequestProto>() {
      @java.lang.Override
      public GetStatusRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<GetStatusRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<GetStatusRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface GetStatusResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.GetStatusResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>optional string status = 1;</code>
     * @return The status.
     */
    java.lang.String getStatus();
    /**
     * <code>optional string status = 1;</code>
     * @return The bytes for status.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStatusBytes();
  }
  /**
   * Protobuf type {@code hadoop.yarn.GetStatusResponseProto}
   */
  public static final class GetStatusResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.GetStatusResponseProto)
      GetStatusResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use GetStatusResponseProto.newBuilder() to construct.
    private GetStatusResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private GetStatusResponseProto() {
      status_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new GetStatusResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object status_ = "";
    /**
     * <code>optional string status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override
    public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string status = 1;</code>
     * @return The status.
     */
    @java.lang.Override
    public java.lang.String getStatus() {
      java.lang.Object ref = status_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          status_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string status = 1;</code>
     * @return The bytes for status.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStatusBytes() {
      java.lang.Object ref = status_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        status_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, status_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, status_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (!getStatus()
            .equals(other.getStatus())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + getStatus().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.GetStatusResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.GetStatusResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetStatusResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          status_ = other.status_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                status_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object status_ = "";
      /**
       * <code>optional string status = 1;</code>
       * @return Whether the status field is set.
       */
      public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string status = 1;</code>
       * @return The status.
       */
      public java.lang.String getStatus() {
        java.lang.Object ref = status_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            status_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string status = 1;</code>
       * @return The bytes for status.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStatusBytes() {
        java.lang.Object ref = status_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          status_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        status_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        status_ = getDefaultInstance().getStatus();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string status = 1;</code>
       * @param value The bytes for status to set.
       * @return This builder for chaining.
       */
      public Builder setStatusBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        status_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.GetStatusResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.GetStatusResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<GetStatusResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<GetStatusResponseProto>() {
      @java.lang.Override
      public GetStatusResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<GetStatusResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<GetStatusResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StopRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.StopRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.StopRequestProto}
   */
  public static final class StopRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.StopRequestProto)
      StopRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StopRequestProto.newBuilder() to construct.
    private StopRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StopRequestProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StopRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.StopRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.StopRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.StopRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.StopRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StopRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StopRequestProto>() {
      @java.lang.Override
      public StopRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StopRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StopRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StopResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.StopResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.StopResponseProto}
   */
  public static final class StopResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.StopResponseProto)
      StopResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StopResponseProto.newBuilder() to construct.
    private StopResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StopResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StopResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.StopResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.StopResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_StopResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.StopResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.StopResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StopResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StopResponseProto>() {
      @java.lang.Override
      public StopResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StopResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StopResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface UpgradeServiceRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.UpgradeServiceRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string version = 1;</code>
     * @return Whether the version field is set.
     */
    boolean hasVersion();
    /**
     * <code>optional string version = 1;</code>
     * @return The version.
     */
    java.lang.String getVersion();
    /**
     * <code>optional string version = 1;</code>
     * @return The bytes for version.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getVersionBytes();

    /**
     * <code>optional bool autoFinalize = 2;</code>
     * @return Whether the autoFinalize field is set.
     */
    boolean hasAutoFinalize();
    /**
     * <code>optional bool autoFinalize = 2;</code>
     * @return The autoFinalize.
     */
    boolean getAutoFinalize();

    /**
     * <code>optional bool expressUpgrade = 3;</code>
     * @return Whether the expressUpgrade field is set.
     */
    boolean hasExpressUpgrade();
    /**
     * <code>optional bool expressUpgrade = 3;</code>
     * @return The expressUpgrade.
     */
    boolean getExpressUpgrade();
  }
  /**
   * Protobuf type {@code hadoop.yarn.UpgradeServiceRequestProto}
   */
  public static final class UpgradeServiceRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.UpgradeServiceRequestProto)
      UpgradeServiceRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use UpgradeServiceRequestProto.newBuilder() to construct.
    private UpgradeServiceRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private UpgradeServiceRequestProto() {
      version_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new UpgradeServiceRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int VERSION_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object version_ = "";
    /**
     * <code>optional string version = 1;</code>
     * @return Whether the version field is set.
     */
    @java.lang.Override
    public boolean hasVersion() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string version = 1;</code>
     * @return The version.
     */
    @java.lang.Override
    public java.lang.String getVersion() {
      java.lang.Object ref = version_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          version_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string version = 1;</code>
     * @return The bytes for version.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getVersionBytes() {
      java.lang.Object ref = version_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        version_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int AUTOFINALIZE_FIELD_NUMBER = 2;
    private boolean autoFinalize_ = false;
    /**
     * <code>optional bool autoFinalize = 2;</code>
     * @return Whether the autoFinalize field is set.
     */
    @java.lang.Override
    public boolean hasAutoFinalize() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional bool autoFinalize = 2;</code>
     * @return The autoFinalize.
     */
    @java.lang.Override
    public boolean getAutoFinalize() {
      return autoFinalize_;
    }

    public static final int EXPRESSUPGRADE_FIELD_NUMBER = 3;
    private boolean expressUpgrade_ = false;
    /**
     * <code>optional bool expressUpgrade = 3;</code>
     * @return Whether the expressUpgrade field is set.
     */
    @java.lang.Override
    public boolean hasExpressUpgrade() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional bool expressUpgrade = 3;</code>
     * @return The expressUpgrade.
     */
    @java.lang.Override
    public boolean getExpressUpgrade() {
      return expressUpgrade_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, version_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(2, autoFinalize_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBool(3, expressUpgrade_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, version_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(2, autoFinalize_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(3, expressUpgrade_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto) obj;

      if (hasVersion() != other.hasVersion()) return false;
      if (hasVersion()) {
        if (!getVersion()
            .equals(other.getVersion())) return false;
      }
      if (hasAutoFinalize() != other.hasAutoFinalize()) return false;
      if (hasAutoFinalize()) {
        if (getAutoFinalize()
            != other.getAutoFinalize()) return false;
      }
      if (hasExpressUpgrade() != other.hasExpressUpgrade()) return false;
      if (hasExpressUpgrade()) {
        if (getExpressUpgrade()
            != other.getExpressUpgrade()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasVersion()) {
        hash = (37 * hash) + VERSION_FIELD_NUMBER;
        hash = (53 * hash) + getVersion().hashCode();
      }
      if (hasAutoFinalize()) {
        hash = (37 * hash) + AUTOFINALIZE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getAutoFinalize());
      }
      if (hasExpressUpgrade()) {
        hash = (37 * hash) + EXPRESSUPGRADE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getExpressUpgrade());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.UpgradeServiceRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.UpgradeServiceRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        version_ = "";
        autoFinalize_ = false;
        expressUpgrade_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.version_ = version_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.autoFinalize_ = autoFinalize_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.expressUpgrade_ = expressUpgrade_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.getDefaultInstance()) return this;
        if (other.hasVersion()) {
          version_ = other.version_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasAutoFinalize()) {
          setAutoFinalize(other.getAutoFinalize());
        }
        if (other.hasExpressUpgrade()) {
          setExpressUpgrade(other.getExpressUpgrade());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                version_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                autoFinalize_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                expressUpgrade_ = input.readBool();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object version_ = "";
      /**
       * <code>optional string version = 1;</code>
       * @return Whether the version field is set.
       */
      public boolean hasVersion() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string version = 1;</code>
       * @return The version.
       */
      public java.lang.String getVersion() {
        java.lang.Object ref = version_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            version_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string version = 1;</code>
       * @return The bytes for version.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getVersionBytes() {
        java.lang.Object ref = version_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          version_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string version = 1;</code>
       * @param value The version to set.
       * @return This builder for chaining.
       */
      public Builder setVersion(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        version_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string version = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearVersion() {
        version_ = getDefaultInstance().getVersion();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string version = 1;</code>
       * @param value The bytes for version to set.
       * @return This builder for chaining.
       */
      public Builder setVersionBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        version_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private boolean autoFinalize_ ;
      /**
       * <code>optional bool autoFinalize = 2;</code>
       * @return Whether the autoFinalize field is set.
       */
      @java.lang.Override
      public boolean hasAutoFinalize() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bool autoFinalize = 2;</code>
       * @return The autoFinalize.
       */
      @java.lang.Override
      public boolean getAutoFinalize() {
        return autoFinalize_;
      }
      /**
       * <code>optional bool autoFinalize = 2;</code>
       * @param value The autoFinalize to set.
       * @return This builder for chaining.
       */
      public Builder setAutoFinalize(boolean value) {

        autoFinalize_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool autoFinalize = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearAutoFinalize() {
        bitField0_ = (bitField0_ & ~0x00000002);
        autoFinalize_ = false;
        onChanged();
        return this;
      }

      private boolean expressUpgrade_ ;
      /**
       * <code>optional bool expressUpgrade = 3;</code>
       * @return Whether the expressUpgrade field is set.
       */
      @java.lang.Override
      public boolean hasExpressUpgrade() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bool expressUpgrade = 3;</code>
       * @return The expressUpgrade.
       */
      @java.lang.Override
      public boolean getExpressUpgrade() {
        return expressUpgrade_;
      }
      /**
       * <code>optional bool expressUpgrade = 3;</code>
       * @param value The expressUpgrade to set.
       * @return This builder for chaining.
       */
      public Builder setExpressUpgrade(boolean value) {

        expressUpgrade_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool expressUpgrade = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearExpressUpgrade() {
        bitField0_ = (bitField0_ & ~0x00000004);
        expressUpgrade_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.UpgradeServiceRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.UpgradeServiceRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<UpgradeServiceRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<UpgradeServiceRequestProto>() {
      @java.lang.Override
      public UpgradeServiceRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<UpgradeServiceRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<UpgradeServiceRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface UpgradeServiceResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.UpgradeServiceResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string error = 1;</code>
     * @return Whether the error field is set.
     */
    boolean hasError();
    /**
     * <code>optional string error = 1;</code>
     * @return The error.
     */
    java.lang.String getError();
    /**
     * <code>optional string error = 1;</code>
     * @return The bytes for error.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorBytes();
  }
  /**
   * Protobuf type {@code hadoop.yarn.UpgradeServiceResponseProto}
   */
  public static final class UpgradeServiceResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.UpgradeServiceResponseProto)
      UpgradeServiceResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use UpgradeServiceResponseProto.newBuilder() to construct.
    private UpgradeServiceResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private UpgradeServiceResponseProto() {
      error_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new UpgradeServiceResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int ERROR_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object error_ = "";
    /**
     * <code>optional string error = 1;</code>
     * @return Whether the error field is set.
     */
    @java.lang.Override
    public boolean hasError() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string error = 1;</code>
     * @return The error.
     */
    @java.lang.Override
    public java.lang.String getError() {
      java.lang.Object ref = error_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          error_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string error = 1;</code>
     * @return The bytes for error.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorBytes() {
      java.lang.Object ref = error_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        error_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, error_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, error_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto) obj;

      if (hasError() != other.hasError()) return false;
      if (hasError()) {
        if (!getError()
            .equals(other.getError())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasError()) {
        hash = (37 * hash) + ERROR_FIELD_NUMBER;
        hash = (53 * hash) + getError().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.UpgradeServiceResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.UpgradeServiceResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        error_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_UpgradeServiceResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.error_ = error_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance()) return this;
        if (other.hasError()) {
          error_ = other.error_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                error_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object error_ = "";
      /**
       * <code>optional string error = 1;</code>
       * @return Whether the error field is set.
       */
      public boolean hasError() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string error = 1;</code>
       * @return The error.
       */
      public java.lang.String getError() {
        java.lang.Object ref = error_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            error_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string error = 1;</code>
       * @return The bytes for error.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getErrorBytes() {
        java.lang.Object ref = error_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          error_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string error = 1;</code>
       * @param value The error to set.
       * @return This builder for chaining.
       */
      public Builder setError(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        error_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string error = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearError() {
        error_ = getDefaultInstance().getError();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string error = 1;</code>
       * @param value The bytes for error to set.
       * @return This builder for chaining.
       */
      public Builder setErrorBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        error_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.UpgradeServiceResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.UpgradeServiceResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<UpgradeServiceResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<UpgradeServiceResponseProto>() {
      @java.lang.Override
      public UpgradeServiceResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<UpgradeServiceResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<UpgradeServiceResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CancelUpgradeRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.CancelUpgradeRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.CancelUpgradeRequestProto}
   */
  public static final class CancelUpgradeRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.CancelUpgradeRequestProto)
      CancelUpgradeRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CancelUpgradeRequestProto.newBuilder() to construct.
    private CancelUpgradeRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CancelUpgradeRequestProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CancelUpgradeRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.CancelUpgradeRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.CancelUpgradeRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.CancelUpgradeRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.CancelUpgradeRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CancelUpgradeRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CancelUpgradeRequestProto>() {
      @java.lang.Override
      public CancelUpgradeRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CancelUpgradeRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CancelUpgradeRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CancelUpgradeResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.CancelUpgradeResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.CancelUpgradeResponseProto}
   */
  public static final class CancelUpgradeResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.CancelUpgradeResponseProto)
      CancelUpgradeResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CancelUpgradeResponseProto.newBuilder() to construct.
    private CancelUpgradeResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CancelUpgradeResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CancelUpgradeResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.CancelUpgradeResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.CancelUpgradeResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CancelUpgradeResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.CancelUpgradeResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.CancelUpgradeResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CancelUpgradeResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CancelUpgradeResponseProto>() {
      @java.lang.Override
      public CancelUpgradeResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CancelUpgradeResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CancelUpgradeResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RestartServiceRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.RestartServiceRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.RestartServiceRequestProto}
   */
  public static final class RestartServiceRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.RestartServiceRequestProto)
      RestartServiceRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RestartServiceRequestProto.newBuilder() to construct.
    private RestartServiceRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RestartServiceRequestProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RestartServiceRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.RestartServiceRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.RestartServiceRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.RestartServiceRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.RestartServiceRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RestartServiceRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RestartServiceRequestProto>() {
      @java.lang.Override
      public RestartServiceRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RestartServiceRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RestartServiceRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RestartServiceResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.RestartServiceResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.RestartServiceResponseProto}
   */
  public static final class RestartServiceResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.RestartServiceResponseProto)
      RestartServiceResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RestartServiceResponseProto.newBuilder() to construct.
    private RestartServiceResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RestartServiceResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RestartServiceResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.RestartServiceResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.RestartServiceResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_RestartServiceResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.RestartServiceResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.RestartServiceResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RestartServiceResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RestartServiceResponseProto>() {
      @java.lang.Override
      public RestartServiceResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RestartServiceResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RestartServiceResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CompInstancesUpgradeRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.CompInstancesUpgradeRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated string containerIds = 1;</code>
     * @return A list containing the containerIds.
     */
    java.util.List<java.lang.String>
        getContainerIdsList();
    /**
     * <code>repeated string containerIds = 1;</code>
     * @return The count of containerIds.
     */
    int getContainerIdsCount();
    /**
     * <code>repeated string containerIds = 1;</code>
     * @param index The index of the element to return.
     * @return The containerIds at the given index.
     */
    java.lang.String getContainerIds(int index);
    /**
     * <code>repeated string containerIds = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the containerIds at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getContainerIdsBytes(int index);
  }
  /**
   * Protobuf type {@code hadoop.yarn.CompInstancesUpgradeRequestProto}
   */
  public static final class CompInstancesUpgradeRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.CompInstancesUpgradeRequestProto)
      CompInstancesUpgradeRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CompInstancesUpgradeRequestProto.newBuilder() to construct.
    private CompInstancesUpgradeRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CompInstancesUpgradeRequestProto() {
      containerIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CompInstancesUpgradeRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.Builder.class);
    }

    public static final int CONTAINERIDS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList containerIds_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string containerIds = 1;</code>
     * @return A list containing the containerIds.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getContainerIdsList() {
      return containerIds_;
    }
    /**
     * <code>repeated string containerIds = 1;</code>
     * @return The count of containerIds.
     */
    public int getContainerIdsCount() {
      return containerIds_.size();
    }
    /**
     * <code>repeated string containerIds = 1;</code>
     * @param index The index of the element to return.
     * @return The containerIds at the given index.
     */
    public java.lang.String getContainerIds(int index) {
      return containerIds_.get(index);
    }
    /**
     * <code>repeated string containerIds = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the containerIds at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getContainerIdsBytes(int index) {
      return containerIds_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < containerIds_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, containerIds_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < containerIds_.size(); i++) {
          dataSize += computeStringSizeNoTag(containerIds_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getContainerIdsList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto) obj;

      if (!getContainerIdsList()
          .equals(other.getContainerIdsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getContainerIdsCount() > 0) {
        hash = (37 * hash) + CONTAINERIDS_FIELD_NUMBER;
        hash = (53 * hash) + getContainerIdsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.CompInstancesUpgradeRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.CompInstancesUpgradeRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        containerIds_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          containerIds_.makeImmutable();
          result.containerIds_ = containerIds_;
        }
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.getDefaultInstance()) return this;
        if (!other.containerIds_.isEmpty()) {
          if (containerIds_.isEmpty()) {
            containerIds_ = other.containerIds_;
            bitField0_ |= 0x00000001;
          } else {
            ensureContainerIdsIsMutable();
            containerIds_.addAll(other.containerIds_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureContainerIdsIsMutable();
                containerIds_.add(bs);
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList containerIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureContainerIdsIsMutable() {
        if (!containerIds_.isModifiable()) {
          containerIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(containerIds_);
        }
        bitField0_ |= 0x00000001;
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @return A list containing the containerIds.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getContainerIdsList() {
        containerIds_.makeImmutable();
        return containerIds_;
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @return The count of containerIds.
       */
      public int getContainerIdsCount() {
        return containerIds_.size();
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @param index The index of the element to return.
       * @return The containerIds at the given index.
       */
      public java.lang.String getContainerIds(int index) {
        return containerIds_.get(index);
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @param index The index of the value to return.
       * @return The bytes of the containerIds at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getContainerIdsBytes(int index) {
        return containerIds_.getByteString(index);
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @param index The index to set the value at.
       * @param value The containerIds to set.
       * @return This builder for chaining.
       */
      public Builder setContainerIds(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureContainerIdsIsMutable();
        containerIds_.set(index, value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @param value The containerIds to add.
       * @return This builder for chaining.
       */
      public Builder addContainerIds(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureContainerIdsIsMutable();
        containerIds_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @param values The containerIds to add.
       * @return This builder for chaining.
       */
      public Builder addAllContainerIds(
          java.lang.Iterable<java.lang.String> values) {
        ensureContainerIdsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, containerIds_);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearContainerIds() {
        containerIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerIds = 1;</code>
       * @param value The bytes of the containerIds to add.
       * @return This builder for chaining.
       */
      public Builder addContainerIdsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureContainerIdsIsMutable();
        containerIds_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.CompInstancesUpgradeRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.CompInstancesUpgradeRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CompInstancesUpgradeRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CompInstancesUpgradeRequestProto>() {
      @java.lang.Override
      public CompInstancesUpgradeRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CompInstancesUpgradeRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CompInstancesUpgradeRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CompInstancesUpgradeResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.CompInstancesUpgradeResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.CompInstancesUpgradeResponseProto}
   */
  public static final class CompInstancesUpgradeResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.CompInstancesUpgradeResponseProto)
      CompInstancesUpgradeResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CompInstancesUpgradeResponseProto.newBuilder() to construct.
    private CompInstancesUpgradeResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CompInstancesUpgradeResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CompInstancesUpgradeResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.CompInstancesUpgradeResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.CompInstancesUpgradeResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.CompInstancesUpgradeResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.CompInstancesUpgradeResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CompInstancesUpgradeResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CompInstancesUpgradeResponseProto>() {
      @java.lang.Override
      public CompInstancesUpgradeResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CompInstancesUpgradeResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CompInstancesUpgradeResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface GetCompInstancesRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.GetCompInstancesRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated string componentNames = 1;</code>
     * @return A list containing the componentNames.
     */
    java.util.List<java.lang.String>
        getComponentNamesList();
    /**
     * <code>repeated string componentNames = 1;</code>
     * @return The count of componentNames.
     */
    int getComponentNamesCount();
    /**
     * <code>repeated string componentNames = 1;</code>
     * @param index The index of the element to return.
     * @return The componentNames at the given index.
     */
    java.lang.String getComponentNames(int index);
    /**
     * <code>repeated string componentNames = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the componentNames at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getComponentNamesBytes(int index);

    /**
     * <code>optional string version = 2;</code>
     * @return Whether the version field is set.
     */
    boolean hasVersion();
    /**
     * <code>optional string version = 2;</code>
     * @return The version.
     */
    java.lang.String getVersion();
    /**
     * <code>optional string version = 2;</code>
     * @return The bytes for version.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getVersionBytes();

    /**
     * <code>repeated string containerStates = 3;</code>
     * @return A list containing the containerStates.
     */
    java.util.List<java.lang.String>
        getContainerStatesList();
    /**
     * <code>repeated string containerStates = 3;</code>
     * @return The count of containerStates.
     */
    int getContainerStatesCount();
    /**
     * <code>repeated string containerStates = 3;</code>
     * @param index The index of the element to return.
     * @return The containerStates at the given index.
     */
    java.lang.String getContainerStates(int index);
    /**
     * <code>repeated string containerStates = 3;</code>
     * @param index The index of the value to return.
     * @return The bytes of the containerStates at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getContainerStatesBytes(int index);
  }
  /**
   * Protobuf type {@code hadoop.yarn.GetCompInstancesRequestProto}
   */
  public static final class GetCompInstancesRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.GetCompInstancesRequestProto)
      GetCompInstancesRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use GetCompInstancesRequestProto.newBuilder() to construct.
    private GetCompInstancesRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private GetCompInstancesRequestProto() {
      componentNames_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      version_ = "";
      containerStates_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new GetCompInstancesRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int COMPONENTNAMES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList componentNames_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string componentNames = 1;</code>
     * @return A list containing the componentNames.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getComponentNamesList() {
      return componentNames_;
    }
    /**
     * <code>repeated string componentNames = 1;</code>
     * @return The count of componentNames.
     */
    public int getComponentNamesCount() {
      return componentNames_.size();
    }
    /**
     * <code>repeated string componentNames = 1;</code>
     * @param index The index of the element to return.
     * @return The componentNames at the given index.
     */
    public java.lang.String getComponentNames(int index) {
      return componentNames_.get(index);
    }
    /**
     * <code>repeated string componentNames = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the componentNames at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getComponentNamesBytes(int index) {
      return componentNames_.getByteString(index);
    }

    public static final int VERSION_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object version_ = "";
    /**
     * <code>optional string version = 2;</code>
     * @return Whether the version field is set.
     */
    @java.lang.Override
    public boolean hasVersion() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string version = 2;</code>
     * @return The version.
     */
    @java.lang.Override
    public java.lang.String getVersion() {
      java.lang.Object ref = version_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          version_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string version = 2;</code>
     * @return The bytes for version.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getVersionBytes() {
      java.lang.Object ref = version_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        version_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CONTAINERSTATES_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList containerStates_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string containerStates = 3;</code>
     * @return A list containing the containerStates.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getContainerStatesList() {
      return containerStates_;
    }
    /**
     * <code>repeated string containerStates = 3;</code>
     * @return The count of containerStates.
     */
    public int getContainerStatesCount() {
      return containerStates_.size();
    }
    /**
     * <code>repeated string containerStates = 3;</code>
     * @param index The index of the element to return.
     * @return The containerStates at the given index.
     */
    public java.lang.String getContainerStates(int index) {
      return containerStates_.get(index);
    }
    /**
     * <code>repeated string containerStates = 3;</code>
     * @param index The index of the value to return.
     * @return The bytes of the containerStates at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getContainerStatesBytes(int index) {
      return containerStates_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < componentNames_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, componentNames_.getRaw(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, version_);
      }
      for (int i = 0; i < containerStates_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, containerStates_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < componentNames_.size(); i++) {
          dataSize += computeStringSizeNoTag(componentNames_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getComponentNamesList().size();
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, version_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < containerStates_.size(); i++) {
          dataSize += computeStringSizeNoTag(containerStates_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getContainerStatesList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto) obj;

      if (!getComponentNamesList()
          .equals(other.getComponentNamesList())) return false;
      if (hasVersion() != other.hasVersion()) return false;
      if (hasVersion()) {
        if (!getVersion()
            .equals(other.getVersion())) return false;
      }
      if (!getContainerStatesList()
          .equals(other.getContainerStatesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getComponentNamesCount() > 0) {
        hash = (37 * hash) + COMPONENTNAMES_FIELD_NUMBER;
        hash = (53 * hash) + getComponentNamesList().hashCode();
      }
      if (hasVersion()) {
        hash = (37 * hash) + VERSION_FIELD_NUMBER;
        hash = (53 * hash) + getVersion().hashCode();
      }
      if (getContainerStatesCount() > 0) {
        hash = (37 * hash) + CONTAINERSTATES_FIELD_NUMBER;
        hash = (53 * hash) + getContainerStatesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.GetCompInstancesRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.GetCompInstancesRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        componentNames_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        version_ = "";
        containerStates_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          componentNames_.makeImmutable();
          result.componentNames_ = componentNames_;
        }
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.version_ = version_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          containerStates_.makeImmutable();
          result.containerStates_ = containerStates_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.getDefaultInstance()) return this;
        if (!other.componentNames_.isEmpty()) {
          if (componentNames_.isEmpty()) {
            componentNames_ = other.componentNames_;
            bitField0_ |= 0x00000001;
          } else {
            ensureComponentNamesIsMutable();
            componentNames_.addAll(other.componentNames_);
          }
          onChanged();
        }
        if (other.hasVersion()) {
          version_ = other.version_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (!other.containerStates_.isEmpty()) {
          if (containerStates_.isEmpty()) {
            containerStates_ = other.containerStates_;
            bitField0_ |= 0x00000004;
          } else {
            ensureContainerStatesIsMutable();
            containerStates_.addAll(other.containerStates_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureComponentNamesIsMutable();
                componentNames_.add(bs);
                break;
              } // case 10
              case 18: {
                version_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureContainerStatesIsMutable();
                containerStates_.add(bs);
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList componentNames_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureComponentNamesIsMutable() {
        if (!componentNames_.isModifiable()) {
          componentNames_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(componentNames_);
        }
        bitField0_ |= 0x00000001;
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @return A list containing the componentNames.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getComponentNamesList() {
        componentNames_.makeImmutable();
        return componentNames_;
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @return The count of componentNames.
       */
      public int getComponentNamesCount() {
        return componentNames_.size();
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @param index The index of the element to return.
       * @return The componentNames at the given index.
       */
      public java.lang.String getComponentNames(int index) {
        return componentNames_.get(index);
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @param index The index of the value to return.
       * @return The bytes of the componentNames at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getComponentNamesBytes(int index) {
        return componentNames_.getByteString(index);
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @param index The index to set the value at.
       * @param value The componentNames to set.
       * @return This builder for chaining.
       */
      public Builder setComponentNames(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureComponentNamesIsMutable();
        componentNames_.set(index, value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @param value The componentNames to add.
       * @return This builder for chaining.
       */
      public Builder addComponentNames(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureComponentNamesIsMutable();
        componentNames_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @param values The componentNames to add.
       * @return This builder for chaining.
       */
      public Builder addAllComponentNames(
          java.lang.Iterable<java.lang.String> values) {
        ensureComponentNamesIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, componentNames_);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearComponentNames() {
        componentNames_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string componentNames = 1;</code>
       * @param value The bytes of the componentNames to add.
       * @return This builder for chaining.
       */
      public Builder addComponentNamesBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureComponentNamesIsMutable();
        componentNames_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object version_ = "";
      /**
       * <code>optional string version = 2;</code>
       * @return Whether the version field is set.
       */
      public boolean hasVersion() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string version = 2;</code>
       * @return The version.
       */
      public java.lang.String getVersion() {
        java.lang.Object ref = version_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            version_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string version = 2;</code>
       * @return The bytes for version.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getVersionBytes() {
        java.lang.Object ref = version_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          version_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string version = 2;</code>
       * @param value The version to set.
       * @return This builder for chaining.
       */
      public Builder setVersion(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        version_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional string version = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearVersion() {
        version_ = getDefaultInstance().getVersion();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>optional string version = 2;</code>
       * @param value The bytes for version to set.
       * @return This builder for chaining.
       */
      public Builder setVersionBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        version_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList containerStates_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureContainerStatesIsMutable() {
        if (!containerStates_.isModifiable()) {
          containerStates_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(containerStates_);
        }
        bitField0_ |= 0x00000004;
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @return A list containing the containerStates.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getContainerStatesList() {
        containerStates_.makeImmutable();
        return containerStates_;
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @return The count of containerStates.
       */
      public int getContainerStatesCount() {
        return containerStates_.size();
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @param index The index of the element to return.
       * @return The containerStates at the given index.
       */
      public java.lang.String getContainerStates(int index) {
        return containerStates_.get(index);
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @param index The index of the value to return.
       * @return The bytes of the containerStates at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getContainerStatesBytes(int index) {
        return containerStates_.getByteString(index);
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @param index The index to set the value at.
       * @param value The containerStates to set.
       * @return This builder for chaining.
       */
      public Builder setContainerStates(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureContainerStatesIsMutable();
        containerStates_.set(index, value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @param value The containerStates to add.
       * @return This builder for chaining.
       */
      public Builder addContainerStates(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureContainerStatesIsMutable();
        containerStates_.add(value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @param values The containerStates to add.
       * @return This builder for chaining.
       */
      public Builder addAllContainerStates(
          java.lang.Iterable<java.lang.String> values) {
        ensureContainerStatesIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, containerStates_);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearContainerStates() {
        containerStates_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000004);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string containerStates = 3;</code>
       * @param value The bytes of the containerStates to add.
       * @return This builder for chaining.
       */
      public Builder addContainerStatesBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureContainerStatesIsMutable();
        containerStates_.add(value);
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.GetCompInstancesRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.GetCompInstancesRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<GetCompInstancesRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<GetCompInstancesRequestProto>() {
      @java.lang.Override
      public GetCompInstancesRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<GetCompInstancesRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<GetCompInstancesRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface GetCompInstancesResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.GetCompInstancesResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string compInstances = 1;</code>
     * @return Whether the compInstances field is set.
     */
    boolean hasCompInstances();
    /**
     * <code>optional string compInstances = 1;</code>
     * @return The compInstances.
     */
    java.lang.String getCompInstances();
    /**
     * <code>optional string compInstances = 1;</code>
     * @return The bytes for compInstances.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCompInstancesBytes();
  }
  /**
   * Protobuf type {@code hadoop.yarn.GetCompInstancesResponseProto}
   */
  public static final class GetCompInstancesResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.GetCompInstancesResponseProto)
      GetCompInstancesResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use GetCompInstancesResponseProto.newBuilder() to construct.
    private GetCompInstancesResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private GetCompInstancesResponseProto() {
      compInstances_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new GetCompInstancesResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int COMPINSTANCES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object compInstances_ = "";
    /**
     * <code>optional string compInstances = 1;</code>
     * @return Whether the compInstances field is set.
     */
    @java.lang.Override
    public boolean hasCompInstances() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string compInstances = 1;</code>
     * @return The compInstances.
     */
    @java.lang.Override
    public java.lang.String getCompInstances() {
      java.lang.Object ref = compInstances_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          compInstances_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string compInstances = 1;</code>
     * @return The bytes for compInstances.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCompInstancesBytes() {
      java.lang.Object ref = compInstances_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        compInstances_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, compInstances_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, compInstances_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto) obj;

      if (hasCompInstances() != other.hasCompInstances()) return false;
      if (hasCompInstances()) {
        if (!getCompInstances()
            .equals(other.getCompInstances())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCompInstances()) {
        hash = (37 * hash) + COMPINSTANCES_FIELD_NUMBER;
        hash = (53 * hash) + getCompInstances().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.GetCompInstancesResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.GetCompInstancesResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        compInstances_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_GetCompInstancesResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.compInstances_ = compInstances_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance()) return this;
        if (other.hasCompInstances()) {
          compInstances_ = other.compInstances_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                compInstances_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object compInstances_ = "";
      /**
       * <code>optional string compInstances = 1;</code>
       * @return Whether the compInstances field is set.
       */
      public boolean hasCompInstances() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string compInstances = 1;</code>
       * @return The compInstances.
       */
      public java.lang.String getCompInstances() {
        java.lang.Object ref = compInstances_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            compInstances_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string compInstances = 1;</code>
       * @return The bytes for compInstances.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCompInstancesBytes() {
        java.lang.Object ref = compInstances_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          compInstances_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string compInstances = 1;</code>
       * @param value The compInstances to set.
       * @return This builder for chaining.
       */
      public Builder setCompInstances(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        compInstances_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string compInstances = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearCompInstances() {
        compInstances_ = getDefaultInstance().getCompInstances();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string compInstances = 1;</code>
       * @param value The bytes for compInstances to set.
       * @return This builder for chaining.
       */
      public Builder setCompInstancesBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        compInstances_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.GetCompInstancesResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.GetCompInstancesResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<GetCompInstancesResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<GetCompInstancesResponseProto>() {
      @java.lang.Override
      public GetCompInstancesResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<GetCompInstancesResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<GetCompInstancesResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DecommissionCompInstancesRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.DecommissionCompInstancesRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated string compInstances = 1;</code>
     * @return A list containing the compInstances.
     */
    java.util.List<java.lang.String>
        getCompInstancesList();
    /**
     * <code>repeated string compInstances = 1;</code>
     * @return The count of compInstances.
     */
    int getCompInstancesCount();
    /**
     * <code>repeated string compInstances = 1;</code>
     * @param index The index of the element to return.
     * @return The compInstances at the given index.
     */
    java.lang.String getCompInstances(int index);
    /**
     * <code>repeated string compInstances = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the compInstances at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCompInstancesBytes(int index);
  }
  /**
   * Protobuf type {@code hadoop.yarn.DecommissionCompInstancesRequestProto}
   */
  public static final class DecommissionCompInstancesRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.DecommissionCompInstancesRequestProto)
      DecommissionCompInstancesRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DecommissionCompInstancesRequestProto.newBuilder() to construct.
    private DecommissionCompInstancesRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DecommissionCompInstancesRequestProto() {
      compInstances_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DecommissionCompInstancesRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.Builder.class);
    }

    public static final int COMPINSTANCES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList compInstances_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string compInstances = 1;</code>
     * @return A list containing the compInstances.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getCompInstancesList() {
      return compInstances_;
    }
    /**
     * <code>repeated string compInstances = 1;</code>
     * @return The count of compInstances.
     */
    public int getCompInstancesCount() {
      return compInstances_.size();
    }
    /**
     * <code>repeated string compInstances = 1;</code>
     * @param index The index of the element to return.
     * @return The compInstances at the given index.
     */
    public java.lang.String getCompInstances(int index) {
      return compInstances_.get(index);
    }
    /**
     * <code>repeated string compInstances = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the compInstances at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCompInstancesBytes(int index) {
      return compInstances_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < compInstances_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, compInstances_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < compInstances_.size(); i++) {
          dataSize += computeStringSizeNoTag(compInstances_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getCompInstancesList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto) obj;

      if (!getCompInstancesList()
          .equals(other.getCompInstancesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getCompInstancesCount() > 0) {
        hash = (37 * hash) + COMPINSTANCES_FIELD_NUMBER;
        hash = (53 * hash) + getCompInstancesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.DecommissionCompInstancesRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.DecommissionCompInstancesRequestProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        compInstances_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          compInstances_.makeImmutable();
          result.compInstances_ = compInstances_;
        }
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.getDefaultInstance()) return this;
        if (!other.compInstances_.isEmpty()) {
          if (compInstances_.isEmpty()) {
            compInstances_ = other.compInstances_;
            bitField0_ |= 0x00000001;
          } else {
            ensureCompInstancesIsMutable();
            compInstances_.addAll(other.compInstances_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureCompInstancesIsMutable();
                compInstances_.add(bs);
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList compInstances_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureCompInstancesIsMutable() {
        if (!compInstances_.isModifiable()) {
          compInstances_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(compInstances_);
        }
        bitField0_ |= 0x00000001;
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @return A list containing the compInstances.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getCompInstancesList() {
        compInstances_.makeImmutable();
        return compInstances_;
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @return The count of compInstances.
       */
      public int getCompInstancesCount() {
        return compInstances_.size();
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @param index The index of the element to return.
       * @return The compInstances at the given index.
       */
      public java.lang.String getCompInstances(int index) {
        return compInstances_.get(index);
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @param index The index of the value to return.
       * @return The bytes of the compInstances at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCompInstancesBytes(int index) {
        return compInstances_.getByteString(index);
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @param index The index to set the value at.
       * @param value The compInstances to set.
       * @return This builder for chaining.
       */
      public Builder setCompInstances(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureCompInstancesIsMutable();
        compInstances_.set(index, value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @param value The compInstances to add.
       * @return This builder for chaining.
       */
      public Builder addCompInstances(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureCompInstancesIsMutable();
        compInstances_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @param values The compInstances to add.
       * @return This builder for chaining.
       */
      public Builder addAllCompInstances(
          java.lang.Iterable<java.lang.String> values) {
        ensureCompInstancesIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, compInstances_);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearCompInstances() {
        compInstances_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string compInstances = 1;</code>
       * @param value The bytes of the compInstances to add.
       * @return This builder for chaining.
       */
      public Builder addCompInstancesBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureCompInstancesIsMutable();
        compInstances_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.DecommissionCompInstancesRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.DecommissionCompInstancesRequestProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DecommissionCompInstancesRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DecommissionCompInstancesRequestProto>() {
      @java.lang.Override
      public DecommissionCompInstancesRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DecommissionCompInstancesRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DecommissionCompInstancesRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DecommissionCompInstancesResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.yarn.DecommissionCompInstancesResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.yarn.DecommissionCompInstancesResponseProto}
   */
  public static final class DecommissionCompInstancesResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.yarn.DecommissionCompInstancesResponseProto)
      DecommissionCompInstancesResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DecommissionCompInstancesResponseProto.newBuilder() to construct.
    private DecommissionCompInstancesResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DecommissionCompInstancesResponseProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DecommissionCompInstancesResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.Builder.class);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto other = (org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.yarn.DecommissionCompInstancesResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.yarn.DecommissionCompInstancesResponseProto)
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.class, org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto build() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto buildPartial() {
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto result = new org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto) {
          return mergeFrom((org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto other) {
        if (other == org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.yarn.DecommissionCompInstancesResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.DecommissionCompInstancesResponseProto)
    private static final org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto();
    }

    public static org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DecommissionCompInstancesResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DecommissionCompInstancesResponseProto>() {
      @java.lang.Override
      public DecommissionCompInstancesResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DecommissionCompInstancesResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DecommissionCompInstancesResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  /**
   * Protobuf service {@code hadoop.yarn.ClientAMProtocolService}
   */
  public static abstract class ClientAMProtocolService
      implements org.apache.hadoop.thirdparty.protobuf.Service {
    protected ClientAMProtocolService() {}

    public interface Interface {
      /**
       * <code>rpc flexComponents(.hadoop.yarn.FlexComponentsRequestProto) returns (.hadoop.yarn.FlexComponentsResponseProto);</code>
       */
      public abstract void flexComponents(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto> done);

      /**
       * <code>rpc getStatus(.hadoop.yarn.GetStatusRequestProto) returns (.hadoop.yarn.GetStatusResponseProto);</code>
       */
      public abstract void getStatus(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto> done);

      /**
       * <code>rpc stop(.hadoop.yarn.StopRequestProto) returns (.hadoop.yarn.StopResponseProto);</code>
       */
      public abstract void stop(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto> done);

      /**
       * <code>rpc upgradeService(.hadoop.yarn.UpgradeServiceRequestProto) returns (.hadoop.yarn.UpgradeServiceResponseProto);</code>
       */
      public abstract void upgradeService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto> done);

      /**
       * <code>rpc cancelUpgrade(.hadoop.yarn.CancelUpgradeRequestProto) returns (.hadoop.yarn.CancelUpgradeResponseProto);</code>
       */
      public abstract void cancelUpgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto> done);

      /**
       * <code>rpc restartService(.hadoop.yarn.RestartServiceRequestProto) returns (.hadoop.yarn.RestartServiceResponseProto);</code>
       */
      public abstract void restartService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto> done);

      /**
       * <code>rpc upgrade(.hadoop.yarn.CompInstancesUpgradeRequestProto) returns (.hadoop.yarn.CompInstancesUpgradeResponseProto);</code>
       */
      public abstract void upgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto> done);

      /**
       * <code>rpc getCompInstances(.hadoop.yarn.GetCompInstancesRequestProto) returns (.hadoop.yarn.GetCompInstancesResponseProto);</code>
       */
      public abstract void getCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto> done);

      /**
       * <code>rpc decommissionCompInstances(.hadoop.yarn.DecommissionCompInstancesRequestProto) returns (.hadoop.yarn.DecommissionCompInstancesResponseProto);</code>
       */
      public abstract void decommissionCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto> done);

    }

    public static org.apache.hadoop.thirdparty.protobuf.Service newReflectiveService(
        final Interface impl) {
      return new ClientAMProtocolService() {
        @java.lang.Override
        public  void flexComponents(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto> done) {
          impl.flexComponents(controller, request, done);
        }

        @java.lang.Override
        public  void getStatus(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto> done) {
          impl.getStatus(controller, request, done);
        }

        @java.lang.Override
        public  void stop(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto> done) {
          impl.stop(controller, request, done);
        }

        @java.lang.Override
        public  void upgradeService(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto> done) {
          impl.upgradeService(controller, request, done);
        }

        @java.lang.Override
        public  void cancelUpgrade(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto> done) {
          impl.cancelUpgrade(controller, request, done);
        }

        @java.lang.Override
        public  void restartService(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto> done) {
          impl.restartService(controller, request, done);
        }

        @java.lang.Override
        public  void upgrade(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto> done) {
          impl.upgrade(controller, request, done);
        }

        @java.lang.Override
        public  void getCompInstances(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto> done) {
          impl.getCompInstances(controller, request, done);
        }

        @java.lang.Override
        public  void decommissionCompInstances(
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto request,
            org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto> done) {
          impl.decommissionCompInstances(controller, request, done);
        }

      };
    }

    public static org.apache.hadoop.thirdparty.protobuf.BlockingService
        newReflectiveBlockingService(final BlockingInterface impl) {
      return new org.apache.hadoop.thirdparty.protobuf.BlockingService() {
        public final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
            getDescriptorForType() {
          return getDescriptor();
        }

        public final org.apache.hadoop.thirdparty.protobuf.Message callBlockingMethod(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method,
            org.apache.hadoop.thirdparty.protobuf.RpcController controller,
            org.apache.hadoop.thirdparty.protobuf.Message request)
            throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.callBlockingMethod() given method descriptor for " +
              "wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return impl.flexComponents(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto)request);
            case 1:
              return impl.getStatus(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto)request);
            case 2:
              return impl.stop(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto)request);
            case 3:
              return impl.upgradeService(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto)request);
            case 4:
              return impl.cancelUpgrade(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto)request);
            case 5:
              return impl.restartService(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto)request);
            case 6:
              return impl.upgrade(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto)request);
            case 7:
              return impl.getCompInstances(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto)request);
            case 8:
              return impl.decommissionCompInstances(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto)request);
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

        public final org.apache.hadoop.thirdparty.protobuf.Message
            getRequestPrototype(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getRequestPrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.getDefaultInstance();
            case 4:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.getDefaultInstance();
            case 5:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.getDefaultInstance();
            case 6:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.getDefaultInstance();
            case 7:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.getDefaultInstance();
            case 8:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

        public final org.apache.hadoop.thirdparty.protobuf.Message
            getResponsePrototype(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
          if (method.getService() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "Service.getResponsePrototype() given method " +
              "descriptor for wrong service type.");
          }
          switch(method.getIndex()) {
            case 0:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance();
            case 1:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance();
            case 2:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance();
            case 3:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance();
            case 4:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance();
            case 5:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance();
            case 6:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance();
            case 7:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance();
            case 8:
              return org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance();
            default:
              throw new java.lang.AssertionError("Can't get here.");
          }
        }

      };
    }

    /**
     * <code>rpc flexComponents(.hadoop.yarn.FlexComponentsRequestProto) returns (.hadoop.yarn.FlexComponentsResponseProto);</code>
     */
    public abstract void flexComponents(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto> done);

    /**
     * <code>rpc getStatus(.hadoop.yarn.GetStatusRequestProto) returns (.hadoop.yarn.GetStatusResponseProto);</code>
     */
    public abstract void getStatus(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto> done);

    /**
     * <code>rpc stop(.hadoop.yarn.StopRequestProto) returns (.hadoop.yarn.StopResponseProto);</code>
     */
    public abstract void stop(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto> done);

    /**
     * <code>rpc upgradeService(.hadoop.yarn.UpgradeServiceRequestProto) returns (.hadoop.yarn.UpgradeServiceResponseProto);</code>
     */
    public abstract void upgradeService(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto> done);

    /**
     * <code>rpc cancelUpgrade(.hadoop.yarn.CancelUpgradeRequestProto) returns (.hadoop.yarn.CancelUpgradeResponseProto);</code>
     */
    public abstract void cancelUpgrade(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto> done);

    /**
     * <code>rpc restartService(.hadoop.yarn.RestartServiceRequestProto) returns (.hadoop.yarn.RestartServiceResponseProto);</code>
     */
    public abstract void restartService(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto> done);

    /**
     * <code>rpc upgrade(.hadoop.yarn.CompInstancesUpgradeRequestProto) returns (.hadoop.yarn.CompInstancesUpgradeResponseProto);</code>
     */
    public abstract void upgrade(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto> done);

    /**
     * <code>rpc getCompInstances(.hadoop.yarn.GetCompInstancesRequestProto) returns (.hadoop.yarn.GetCompInstancesResponseProto);</code>
     */
    public abstract void getCompInstances(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto> done);

    /**
     * <code>rpc decommissionCompInstances(.hadoop.yarn.DecommissionCompInstancesRequestProto) returns (.hadoop.yarn.DecommissionCompInstancesResponseProto);</code>
     */
    public abstract void decommissionCompInstances(
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto> done);

    public static final
        org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
        getDescriptor() {
      return org.apache.hadoop.yarn.proto.ClientAMProtocol.getDescriptor().getServices().get(0);
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.ServiceDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }

    public final void callMethod(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method,
        org.apache.hadoop.thirdparty.protobuf.RpcController controller,
        org.apache.hadoop.thirdparty.protobuf.Message request,
        org.apache.hadoop.thirdparty.protobuf.RpcCallback<
          org.apache.hadoop.thirdparty.protobuf.Message> done) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.callMethod() given method descriptor for wrong " +
          "service type.");
      }
      switch(method.getIndex()) {
        case 0:
          this.flexComponents(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto>specializeCallback(
              done));
          return;
        case 1:
          this.getStatus(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto>specializeCallback(
              done));
          return;
        case 2:
          this.stop(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto>specializeCallback(
              done));
          return;
        case 3:
          this.upgradeService(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto>specializeCallback(
              done));
          return;
        case 4:
          this.cancelUpgrade(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto>specializeCallback(
              done));
          return;
        case 5:
          this.restartService(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto>specializeCallback(
              done));
          return;
        case 6:
          this.upgrade(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto>specializeCallback(
              done));
          return;
        case 7:
          this.getCompInstances(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto>specializeCallback(
              done));
          return;
        case 8:
          this.decommissionCompInstances(controller, (org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto)request,
            org.apache.hadoop.thirdparty.protobuf.RpcUtil.<org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto>specializeCallback(
              done));
          return;
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public final org.apache.hadoop.thirdparty.protobuf.Message
        getRequestPrototype(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getRequestPrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto.getDefaultInstance();
        case 4:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto.getDefaultInstance();
        case 5:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto.getDefaultInstance();
        case 6:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto.getDefaultInstance();
        case 7:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto.getDefaultInstance();
        case 8:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public final org.apache.hadoop.thirdparty.protobuf.Message
        getResponsePrototype(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.MethodDescriptor method) {
      if (method.getService() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "Service.getResponsePrototype() given method " +
          "descriptor for wrong service type.");
      }
      switch(method.getIndex()) {
        case 0:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance();
        case 1:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance();
        case 2:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance();
        case 3:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance();
        case 4:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance();
        case 5:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance();
        case 6:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance();
        case 7:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance();
        case 8:
          return org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance();
        default:
          throw new java.lang.AssertionError("Can't get here.");
      }
    }

    public static Stub newStub(
        org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) {
      return new Stub(channel);
    }

    public static final class Stub extends org.apache.hadoop.yarn.proto.ClientAMProtocol.ClientAMProtocolService implements Interface {
      private Stub(org.apache.hadoop.thirdparty.protobuf.RpcChannel channel) {
        this.channel = channel;
      }

      private final org.apache.hadoop.thirdparty.protobuf.RpcChannel channel;

      public org.apache.hadoop.thirdparty.protobuf.RpcChannel getChannel() {
        return channel;
      }

      public  void flexComponents(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance()));
      }

      public  void getStatus(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance()));
      }

      public  void stop(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance()));
      }

      public  void upgradeService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance()));
      }

      public  void cancelUpgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(4),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance()));
      }

      public  void restartService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(5),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance()));
      }

      public  void upgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(6),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance()));
      }

      public  void getCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(7),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance()));
      }

      public  void decommissionCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto request,
          org.apache.hadoop.thirdparty.protobuf.RpcCallback<org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto> done) {
        channel.callMethod(
          getDescriptor().getMethods().get(8),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance(),
          org.apache.hadoop.thirdparty.protobuf.RpcUtil.generalizeCallback(
            done,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.class,
            org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance()));
      }
    }

    public static BlockingInterface newBlockingStub(
        org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) {
      return new BlockingStub(channel);
    }

    public interface BlockingInterface {
      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto flexComponents(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto getStatus(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto stop(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto upgradeService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto cancelUpgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto restartService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto upgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto getCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto decommissionCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException;
    }

    private static final class BlockingStub implements BlockingInterface {
      private BlockingStub(org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel) {
        this.channel = channel;
      }

      private final org.apache.hadoop.thirdparty.protobuf.BlockingRpcChannel channel;

      public org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto flexComponents(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(0),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto getStatus(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(1),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto stop(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(2),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto upgradeService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(3),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.UpgradeServiceResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto cancelUpgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(4),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CancelUpgradeResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto restartService(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(5),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.RestartServiceResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto upgrade(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(6),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.CompInstancesUpgradeResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto getCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(7),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.GetCompInstancesResponseProto.getDefaultInstance());
      }


      public org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto decommissionCompInstances(
          org.apache.hadoop.thirdparty.protobuf.RpcController controller,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesRequestProto request)
          throws org.apache.hadoop.thirdparty.protobuf.ServiceException {
        return (org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto) channel.callBlockingMethod(
          getDescriptor().getMethods().get(8),
          controller,
          request,
          org.apache.hadoop.yarn.proto.ClientAMProtocol.DecommissionCompInstancesResponseProto.getDefaultInstance());
      }

    }

    // @@protoc_insertion_point(class_scope:hadoop.yarn.ClientAMProtocolService)
  }

  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_FlexComponentsRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_FlexComponentsRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_ComponentCountProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_ComponentCountProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_FlexComponentsResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_FlexComponentsResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_GetStatusRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_GetStatusRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_GetStatusResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_GetStatusResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_StopRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_StopRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_StopResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_StopResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_UpgradeServiceRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_UpgradeServiceRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_UpgradeServiceResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_UpgradeServiceResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_CancelUpgradeRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_CancelUpgradeRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_CancelUpgradeResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_CancelUpgradeResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_RestartServiceRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_RestartServiceRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_RestartServiceResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_RestartServiceResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_GetCompInstancesRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_GetCompInstancesRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_GetCompInstancesResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_GetCompInstancesResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_fieldAccessorTable;

  public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static  org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\026ClientAMProtocol.proto\022\013hadoop.yarn\"R\n" +
      "\032FlexComponentsRequestProto\0224\n\ncomponent" +
      "s\030\001 \003(\0132 .hadoop.yarn.ComponentCountProt" +
      "o\"?\n\023ComponentCountProto\022\014\n\004name\030\001 \001(\t\022\032" +
      "\n\022numberOfContainers\030\002 \001(\003\"\035\n\033FlexCompon" +
      "entsResponseProto\"\027\n\025GetStatusRequestPro" +
      "to\"(\n\026GetStatusResponseProto\022\016\n\006status\030\001" +
      " \001(\t\"\022\n\020StopRequestProto\"\023\n\021StopResponse" +
      "Proto\"[\n\032UpgradeServiceRequestProto\022\017\n\007v" +
      "ersion\030\001 \001(\t\022\024\n\014autoFinalize\030\002 \001(\010\022\026\n\016ex" +
      "pressUpgrade\030\003 \001(\010\",\n\033UpgradeServiceResp" +
      "onseProto\022\r\n\005error\030\001 \001(\t\"\033\n\031CancelUpgrad" +
      "eRequestProto\"\034\n\032CancelUpgradeResponsePr" +
      "oto\"\034\n\032RestartServiceRequestProto\"\035\n\033Res" +
      "tartServiceResponseProto\"8\n CompInstance" +
      "sUpgradeRequestProto\022\024\n\014containerIds\030\001 \003" +
      "(\t\"#\n!CompInstancesUpgradeResponseProto\"" +
      "`\n\034GetCompInstancesRequestProto\022\026\n\016compo" +
      "nentNames\030\001 \003(\t\022\017\n\007version\030\002 \001(\t\022\027\n\017cont" +
      "ainerStates\030\003 \003(\t\"6\n\035GetCompInstancesRes" +
      "ponseProto\022\025\n\rcompInstances\030\001 \001(\t\">\n%Dec" +
      "ommissionCompInstancesRequestProto\022\025\n\rco" +
      "mpInstances\030\001 \003(\t\"(\n&DecommissionCompIns" +
      "tancesResponseProto2\243\007\n\027ClientAMProtocol" +
      "Service\022c\n\016flexComponents\022\'.hadoop.yarn." +
      "FlexComponentsRequestProto\032(.hadoop.yarn" +
      ".FlexComponentsResponseProto\022T\n\tgetStatu" +
      "s\022\".hadoop.yarn.GetStatusRequestProto\032#." +
      "hadoop.yarn.GetStatusResponseProto\022E\n\004st" +
      "op\022\035.hadoop.yarn.StopRequestProto\032\036.hado" +
      "op.yarn.StopResponseProto\022c\n\016upgradeServ" +
      "ice\022\'.hadoop.yarn.UpgradeServiceRequestP" +
      "roto\032(.hadoop.yarn.UpgradeServiceRespons" +
      "eProto\022`\n\rcancelUpgrade\022&.hadoop.yarn.Ca" +
      "ncelUpgradeRequestProto\032\'.hadoop.yarn.Ca" +
      "ncelUpgradeResponseProto\022c\n\016restartServi" +
      "ce\022\'.hadoop.yarn.RestartServiceRequestPr" +
      "oto\032(.hadoop.yarn.RestartServiceResponse" +
      "Proto\022h\n\007upgrade\022-.hadoop.yarn.CompInsta" +
      "ncesUpgradeRequestProto\032..hadoop.yarn.Co" +
      "mpInstancesUpgradeResponseProto\022i\n\020getCo" +
      "mpInstances\022).hadoop.yarn.GetCompInstanc" +
      "esRequestProto\032*.hadoop.yarn.GetCompInst" +
      "ancesResponseProto\022\204\001\n\031decommissionCompI" +
      "nstances\0222.hadoop.yarn.DecommissionCompI" +
      "nstancesRequestProto\0323.hadoop.yarn.Decom" +
      "missionCompInstancesResponseProtoB6\n\034org" +
      ".apache.hadoop.yarn.protoB\020ClientAMProto" +
      "col\210\001\001\240\001\001"
    };
    descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
        });
    internal_static_hadoop_yarn_FlexComponentsRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(0);
    internal_static_hadoop_yarn_FlexComponentsRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_FlexComponentsRequestProto_descriptor,
        new java.lang.String[] { "Components", });
    internal_static_hadoop_yarn_ComponentCountProto_descriptor =
      getDescriptor().getMessageTypes().get(1);
    internal_static_hadoop_yarn_ComponentCountProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_ComponentCountProto_descriptor,
        new java.lang.String[] { "Name", "NumberOfContainers", });
    internal_static_hadoop_yarn_FlexComponentsResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(2);
    internal_static_hadoop_yarn_FlexComponentsResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_FlexComponentsResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_GetStatusRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(3);
    internal_static_hadoop_yarn_GetStatusRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_GetStatusRequestProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_GetStatusResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(4);
    internal_static_hadoop_yarn_GetStatusResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_GetStatusResponseProto_descriptor,
        new java.lang.String[] { "Status", });
    internal_static_hadoop_yarn_StopRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(5);
    internal_static_hadoop_yarn_StopRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_StopRequestProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_StopResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(6);
    internal_static_hadoop_yarn_StopResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_StopResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_UpgradeServiceRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(7);
    internal_static_hadoop_yarn_UpgradeServiceRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_UpgradeServiceRequestProto_descriptor,
        new java.lang.String[] { "Version", "AutoFinalize", "ExpressUpgrade", });
    internal_static_hadoop_yarn_UpgradeServiceResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(8);
    internal_static_hadoop_yarn_UpgradeServiceResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_UpgradeServiceResponseProto_descriptor,
        new java.lang.String[] { "Error", });
    internal_static_hadoop_yarn_CancelUpgradeRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(9);
    internal_static_hadoop_yarn_CancelUpgradeRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_CancelUpgradeRequestProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_CancelUpgradeResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(10);
    internal_static_hadoop_yarn_CancelUpgradeResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_CancelUpgradeResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_RestartServiceRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(11);
    internal_static_hadoop_yarn_RestartServiceRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_RestartServiceRequestProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_RestartServiceResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(12);
    internal_static_hadoop_yarn_RestartServiceResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_RestartServiceResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(13);
    internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_CompInstancesUpgradeRequestProto_descriptor,
        new java.lang.String[] { "ContainerIds", });
    internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(14);
    internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_CompInstancesUpgradeResponseProto_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_yarn_GetCompInstancesRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(15);
    internal_static_hadoop_yarn_GetCompInstancesRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_GetCompInstancesRequestProto_descriptor,
        new java.lang.String[] { "ComponentNames", "Version", "ContainerStates", });
    internal_static_hadoop_yarn_GetCompInstancesResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(16);
    internal_static_hadoop_yarn_GetCompInstancesResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_GetCompInstancesResponseProto_descriptor,
        new java.lang.String[] { "CompInstances", });
    internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(17);
    internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_DecommissionCompInstancesRequestProto_descriptor,
        new java.lang.String[] { "CompInstances", });
    internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(18);
    internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_yarn_DecommissionCompInstancesResponseProto_descriptor,
        new java.lang.String[] { });
  }

  // @@protoc_insertion_point(outer_class_scope)
}