|
@@ -1,10690 +0,0 @@
|
|
|
-/**
|
|
|
- * Licensed to the Apache Software Foundation (ASF) under one
|
|
|
- * or more contributor license agreements. See the NOTICE file
|
|
|
- * distributed with this work for additional information
|
|
|
- * regarding copyright ownership. The ASF licenses this file
|
|
|
- * to you under the Apache License, Version 2.0 (the
|
|
|
- * "License"); you may not use this file except in compliance
|
|
|
- * with the License. You may obtain a copy of the License at
|
|
|
- *
|
|
|
- * http://www.apache.org/licenses/LICENSE-2.0
|
|
|
- *
|
|
|
- * Unless required by applicable law or agreed to in writing, software
|
|
|
- * distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
- * See the License for the specific language governing permissions and
|
|
|
- * limitations under the License.
|
|
|
- */
|
|
|
-// Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
|
-// source: datatransfer.proto
|
|
|
-
|
|
|
-package org.apache.hadoop.hdfs.protocol.proto;
|
|
|
-
|
|
|
-public final class DataTransferProtos {
|
|
|
- private DataTransferProtos() {}
|
|
|
- public static void registerAllExtensions(
|
|
|
- com.google.protobuf.ExtensionRegistry registry) {
|
|
|
- }
|
|
|
- public enum Status
|
|
|
- implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
- SUCCESS(0, 0),
|
|
|
- ERROR(1, 1),
|
|
|
- ERROR_CHECKSUM(2, 2),
|
|
|
- ERROR_INVALID(3, 3),
|
|
|
- ERROR_EXISTS(4, 4),
|
|
|
- ERROR_ACCESS_TOKEN(5, 5),
|
|
|
- CHECKSUM_OK(6, 6),
|
|
|
- ;
|
|
|
-
|
|
|
- public static final int SUCCESS_VALUE = 0;
|
|
|
- public static final int ERROR_VALUE = 1;
|
|
|
- public static final int ERROR_CHECKSUM_VALUE = 2;
|
|
|
- public static final int ERROR_INVALID_VALUE = 3;
|
|
|
- public static final int ERROR_EXISTS_VALUE = 4;
|
|
|
- public static final int ERROR_ACCESS_TOKEN_VALUE = 5;
|
|
|
- public static final int CHECKSUM_OK_VALUE = 6;
|
|
|
-
|
|
|
-
|
|
|
- public final int getNumber() { return value; }
|
|
|
-
|
|
|
- public static Status valueOf(int value) {
|
|
|
- switch (value) {
|
|
|
- case 0: return SUCCESS;
|
|
|
- case 1: return ERROR;
|
|
|
- case 2: return ERROR_CHECKSUM;
|
|
|
- case 3: return ERROR_INVALID;
|
|
|
- case 4: return ERROR_EXISTS;
|
|
|
- case 5: return ERROR_ACCESS_TOKEN;
|
|
|
- case 6: return CHECKSUM_OK;
|
|
|
- default: return null;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public static com.google.protobuf.Internal.EnumLiteMap<Status>
|
|
|
- internalGetValueMap() {
|
|
|
- return internalValueMap;
|
|
|
- }
|
|
|
- private static com.google.protobuf.Internal.EnumLiteMap<Status>
|
|
|
- internalValueMap =
|
|
|
- new com.google.protobuf.Internal.EnumLiteMap<Status>() {
|
|
|
- public Status findValueByNumber(int number) {
|
|
|
- return Status.valueOf(number);
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
- getValueDescriptor() {
|
|
|
- return getDescriptor().getValues().get(index);
|
|
|
- }
|
|
|
- public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return getDescriptor();
|
|
|
- }
|
|
|
- public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(0);
|
|
|
- }
|
|
|
-
|
|
|
- private static final Status[] VALUES = {
|
|
|
- SUCCESS, ERROR, ERROR_CHECKSUM, ERROR_INVALID, ERROR_EXISTS, ERROR_ACCESS_TOKEN, CHECKSUM_OK,
|
|
|
- };
|
|
|
-
|
|
|
- public static Status valueOf(
|
|
|
- com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
- if (desc.getType() != getDescriptor()) {
|
|
|
- throw new java.lang.IllegalArgumentException(
|
|
|
- "EnumValueDescriptor is not for this type.");
|
|
|
- }
|
|
|
- return VALUES[desc.getIndex()];
|
|
|
- }
|
|
|
-
|
|
|
- private final int index;
|
|
|
- private final int value;
|
|
|
-
|
|
|
- private Status(int index, int value) {
|
|
|
- this.index = index;
|
|
|
- this.value = value;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(enum_scope:Status)
|
|
|
- }
|
|
|
-
|
|
|
- public interface BaseHeaderProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .ExtendedBlockProto block = 1;
|
|
|
- boolean hasBlock();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
-
|
|
|
- // optional .BlockTokenIdentifierProto token = 2;
|
|
|
- boolean hasToken();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder();
|
|
|
- }
|
|
|
- public static final class BaseHeaderProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements BaseHeaderProtoOrBuilder {
|
|
|
- // Use BaseHeaderProto.newBuilder() to construct.
|
|
|
- private BaseHeaderProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private BaseHeaderProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final BaseHeaderProto defaultInstance;
|
|
|
- public static BaseHeaderProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public BaseHeaderProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .ExtendedBlockProto block = 1;
|
|
|
- public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
- public boolean hasBlock() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
- return block_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
- return block_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional .BlockTokenIdentifierProto token = 2;
|
|
|
- public static final int TOKEN_FIELD_NUMBER = 2;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_;
|
|
|
- public boolean hasToken() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() {
|
|
|
- return token_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() {
|
|
|
- return token_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
- token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasBlock()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getBlock().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (hasToken()) {
|
|
|
- if (!getToken().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, block_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeMessage(2, token_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, block_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(2, token_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasBlock() == other.hasBlock());
|
|
|
- if (hasBlock()) {
|
|
|
- result = result && getBlock()
|
|
|
- .equals(other.getBlock());
|
|
|
- }
|
|
|
- result = result && (hasToken() == other.hasToken());
|
|
|
- if (hasToken()) {
|
|
|
- result = result && getToken()
|
|
|
- .equals(other.getToken());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasBlock()) {
|
|
|
- hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getBlock().hashCode();
|
|
|
- }
|
|
|
- if (hasToken()) {
|
|
|
- hash = (37 * hash) + TOKEN_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getToken().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BaseHeaderProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getBlockFieldBuilder();
|
|
|
- getTokenFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- blockBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- tokenBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- result.block_ = block_;
|
|
|
- } else {
|
|
|
- result.block_ = blockBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- result.token_ = token_;
|
|
|
- } else {
|
|
|
- result.token_ = tokenBuilder_.build();
|
|
|
- }
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasBlock()) {
|
|
|
- mergeBlock(other.getBlock());
|
|
|
- }
|
|
|
- if (other.hasToken()) {
|
|
|
- mergeToken(other.getToken());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasBlock()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getBlock().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (hasToken()) {
|
|
|
- if (!getToken().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
- if (hasBlock()) {
|
|
|
- subBuilder.mergeFrom(getBlock());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setBlock(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder();
|
|
|
- if (hasToken()) {
|
|
|
- subBuilder.mergeFrom(getToken());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setToken(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .ExtendedBlockProto block = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
- public boolean hasBlock() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- return block_;
|
|
|
- } else {
|
|
|
- return blockBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- block_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- blockBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setBlock(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- block_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- blockBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
- block_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- block_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- blockBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearBlock() {
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- blockBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getBlockFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
- if (blockBuilder_ != null) {
|
|
|
- return blockBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return block_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
- getBlockFieldBuilder() {
|
|
|
- if (blockBuilder_ == null) {
|
|
|
- blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
- block_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- block_ = null;
|
|
|
- }
|
|
|
- return blockBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional .BlockTokenIdentifierProto token = 2;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_;
|
|
|
- public boolean hasToken() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() {
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- return token_;
|
|
|
- } else {
|
|
|
- return tokenBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- token_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- tokenBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setToken(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) {
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- token_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- tokenBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
|
|
- token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) {
|
|
|
- token_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- token_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- tokenBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearToken() {
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- tokenBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- onChanged();
|
|
|
- return getTokenFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() {
|
|
|
- if (tokenBuilder_ != null) {
|
|
|
- return tokenBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return token_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>
|
|
|
- getTokenFieldBuilder() {
|
|
|
- if (tokenBuilder_ == null) {
|
|
|
- tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>(
|
|
|
- token_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- token_ = null;
|
|
|
- }
|
|
|
- return tokenBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:BaseHeaderProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new BaseHeaderProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:BaseHeaderProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface ClientOperationHeaderProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .BaseHeaderProto baseHeader = 1;
|
|
|
- boolean hasBaseHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder();
|
|
|
-
|
|
|
- // required string clientName = 2;
|
|
|
- boolean hasClientName();
|
|
|
- String getClientName();
|
|
|
- }
|
|
|
- public static final class ClientOperationHeaderProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements ClientOperationHeaderProtoOrBuilder {
|
|
|
- // Use ClientOperationHeaderProto.newBuilder() to construct.
|
|
|
- private ClientOperationHeaderProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private ClientOperationHeaderProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final ClientOperationHeaderProto defaultInstance;
|
|
|
- public static ClientOperationHeaderProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public ClientOperationHeaderProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .BaseHeaderProto baseHeader = 1;
|
|
|
- public static final int BASEHEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
|
|
|
- public boolean hasBaseHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
|
|
|
- return baseHeader_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
|
|
|
- return baseHeader_;
|
|
|
- }
|
|
|
-
|
|
|
- // required string clientName = 2;
|
|
|
- public static final int CLIENTNAME_FIELD_NUMBER = 2;
|
|
|
- private java.lang.Object clientName_;
|
|
|
- public boolean hasClientName() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public String getClientName() {
|
|
|
- java.lang.Object ref = clientName_;
|
|
|
- if (ref instanceof String) {
|
|
|
- return (String) ref;
|
|
|
- } else {
|
|
|
- com.google.protobuf.ByteString bs =
|
|
|
- (com.google.protobuf.ByteString) ref;
|
|
|
- String s = bs.toStringUtf8();
|
|
|
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
- clientName_ = s;
|
|
|
- }
|
|
|
- return s;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.ByteString getClientNameBytes() {
|
|
|
- java.lang.Object ref = clientName_;
|
|
|
- if (ref instanceof String) {
|
|
|
- com.google.protobuf.ByteString b =
|
|
|
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
- clientName_ = b;
|
|
|
- return b;
|
|
|
- } else {
|
|
|
- return (com.google.protobuf.ByteString) ref;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- clientName_ = "";
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasBaseHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasClientName()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getBaseHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, baseHeader_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeBytes(2, getClientNameBytes());
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, baseHeader_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeBytesSize(2, getClientNameBytes());
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasBaseHeader() == other.hasBaseHeader());
|
|
|
- if (hasBaseHeader()) {
|
|
|
- result = result && getBaseHeader()
|
|
|
- .equals(other.getBaseHeader());
|
|
|
- }
|
|
|
- result = result && (hasClientName() == other.hasClientName());
|
|
|
- if (hasClientName()) {
|
|
|
- result = result && getClientName()
|
|
|
- .equals(other.getClientName());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasBaseHeader()) {
|
|
|
- hash = (37 * hash) + BASEHEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getBaseHeader().hashCode();
|
|
|
- }
|
|
|
- if (hasClientName()) {
|
|
|
- hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getClientName().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientOperationHeaderProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getBaseHeaderFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- baseHeaderBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- clientName_ = "";
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- result.baseHeader_ = baseHeader_;
|
|
|
- } else {
|
|
|
- result.baseHeader_ = baseHeaderBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.clientName_ = clientName_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasBaseHeader()) {
|
|
|
- mergeBaseHeader(other.getBaseHeader());
|
|
|
- }
|
|
|
- if (other.hasClientName()) {
|
|
|
- setClientName(other.getClientName());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasBaseHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasClientName()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getBaseHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder();
|
|
|
- if (hasBaseHeader()) {
|
|
|
- subBuilder.mergeFrom(getBaseHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setBaseHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- clientName_ = input.readBytes();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .BaseHeaderProto baseHeader = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> baseHeaderBuilder_;
|
|
|
- public boolean hasBaseHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- return baseHeader_;
|
|
|
- } else {
|
|
|
- return baseHeaderBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- baseHeader_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- baseHeaderBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setBaseHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- baseHeader_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- baseHeaderBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
|
|
|
- baseHeader_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(baseHeader_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- baseHeader_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- baseHeaderBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearBaseHeader() {
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- baseHeader_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- baseHeaderBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getBaseHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getBaseHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
|
|
|
- if (baseHeaderBuilder_ != null) {
|
|
|
- return baseHeaderBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return baseHeader_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
|
|
|
- getBaseHeaderFieldBuilder() {
|
|
|
- if (baseHeaderBuilder_ == null) {
|
|
|
- baseHeaderBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
|
|
|
- baseHeader_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- baseHeader_ = null;
|
|
|
- }
|
|
|
- return baseHeaderBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // required string clientName = 2;
|
|
|
- private java.lang.Object clientName_ = "";
|
|
|
- public boolean hasClientName() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public String getClientName() {
|
|
|
- java.lang.Object ref = clientName_;
|
|
|
- if (!(ref instanceof String)) {
|
|
|
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
- clientName_ = s;
|
|
|
- return s;
|
|
|
- } else {
|
|
|
- return (String) ref;
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setClientName(String value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- clientName_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearClientName() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- clientName_ = getDefaultInstance().getClientName();
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- void setClientName(com.google.protobuf.ByteString value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- clientName_ = value;
|
|
|
- onChanged();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:ClientOperationHeaderProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new ClientOperationHeaderProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:ClientOperationHeaderProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpReadBlockProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- boolean hasHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
|
|
|
-
|
|
|
- // required uint64 offset = 2;
|
|
|
- boolean hasOffset();
|
|
|
- long getOffset();
|
|
|
-
|
|
|
- // required uint64 len = 3;
|
|
|
- boolean hasLen();
|
|
|
- long getLen();
|
|
|
- }
|
|
|
- public static final class OpReadBlockProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpReadBlockProtoOrBuilder {
|
|
|
- // Use OpReadBlockProto.newBuilder() to construct.
|
|
|
- private OpReadBlockProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpReadBlockProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpReadBlockProto defaultInstance;
|
|
|
- public static OpReadBlockProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpReadBlockProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- public static final int HEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 offset = 2;
|
|
|
- public static final int OFFSET_FIELD_NUMBER = 2;
|
|
|
- private long offset_;
|
|
|
- public boolean hasOffset() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getOffset() {
|
|
|
- return offset_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 len = 3;
|
|
|
- public static final int LEN_FIELD_NUMBER = 3;
|
|
|
- private long len_;
|
|
|
- public boolean hasLen() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public long getLen() {
|
|
|
- return len_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- offset_ = 0L;
|
|
|
- len_ = 0L;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasOffset()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasLen()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, header_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeUInt64(2, offset_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeUInt64(3, len_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, header_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(2, offset_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(3, len_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasHeader() == other.hasHeader());
|
|
|
- if (hasHeader()) {
|
|
|
- result = result && getHeader()
|
|
|
- .equals(other.getHeader());
|
|
|
- }
|
|
|
- result = result && (hasOffset() == other.hasOffset());
|
|
|
- if (hasOffset()) {
|
|
|
- result = result && (getOffset()
|
|
|
- == other.getOffset());
|
|
|
- }
|
|
|
- result = result && (hasLen() == other.hasLen());
|
|
|
- if (hasLen()) {
|
|
|
- result = result && (getLen()
|
|
|
- == other.getLen());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasHeader()) {
|
|
|
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getHeader().hashCode();
|
|
|
- }
|
|
|
- if (hasOffset()) {
|
|
|
- hash = (37 * hash) + OFFSET_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getOffset());
|
|
|
- }
|
|
|
- if (hasLen()) {
|
|
|
- hash = (37 * hash) + LEN_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getLen());
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReadBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getHeaderFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- offset_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- len_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- result.header_ = header_;
|
|
|
- } else {
|
|
|
- result.header_ = headerBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.offset_ = offset_;
|
|
|
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- to_bitField0_ |= 0x00000004;
|
|
|
- }
|
|
|
- result.len_ = len_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasHeader()) {
|
|
|
- mergeHeader(other.getHeader());
|
|
|
- }
|
|
|
- if (other.hasOffset()) {
|
|
|
- setOffset(other.getOffset());
|
|
|
- }
|
|
|
- if (other.hasLen()) {
|
|
|
- setLen(other.getLen());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasOffset()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasLen()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder();
|
|
|
- if (hasHeader()) {
|
|
|
- subBuilder.mergeFrom(getHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 16: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- offset_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 24: {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- len_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- return header_;
|
|
|
- } else {
|
|
|
- return headerBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- header_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
|
|
|
- header_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- header_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- if (headerBuilder_ != null) {
|
|
|
- return headerBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
|
|
|
- getHeaderFieldBuilder() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
|
|
|
- header_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- header_ = null;
|
|
|
- }
|
|
|
- return headerBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 offset = 2;
|
|
|
- private long offset_ ;
|
|
|
- public boolean hasOffset() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getOffset() {
|
|
|
- return offset_;
|
|
|
- }
|
|
|
- public Builder setOffset(long value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- offset_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearOffset() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- offset_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 len = 3;
|
|
|
- private long len_ ;
|
|
|
- public boolean hasLen() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public long getLen() {
|
|
|
- return len_;
|
|
|
- }
|
|
|
- public Builder setLen(long value) {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- len_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearLen() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- len_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpReadBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpReadBlockProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpReadBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface ChecksumProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .ChecksumProto.ChecksumType type = 1;
|
|
|
- boolean hasType();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType();
|
|
|
-
|
|
|
- // required uint32 bytesPerChecksum = 2;
|
|
|
- boolean hasBytesPerChecksum();
|
|
|
- int getBytesPerChecksum();
|
|
|
- }
|
|
|
- public static final class ChecksumProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements ChecksumProtoOrBuilder {
|
|
|
- // Use ChecksumProto.newBuilder() to construct.
|
|
|
- private ChecksumProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private ChecksumProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final ChecksumProto defaultInstance;
|
|
|
- public static ChecksumProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public ChecksumProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- public enum ChecksumType
|
|
|
- implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
- NULL(0, 0),
|
|
|
- CRC32(1, 1),
|
|
|
- CRC32C(2, 2),
|
|
|
- ;
|
|
|
-
|
|
|
- public static final int NULL_VALUE = 0;
|
|
|
- public static final int CRC32_VALUE = 1;
|
|
|
- public static final int CRC32C_VALUE = 2;
|
|
|
-
|
|
|
-
|
|
|
- public final int getNumber() { return value; }
|
|
|
-
|
|
|
- public static ChecksumType valueOf(int value) {
|
|
|
- switch (value) {
|
|
|
- case 0: return NULL;
|
|
|
- case 1: return CRC32;
|
|
|
- case 2: return CRC32C;
|
|
|
- default: return null;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public static com.google.protobuf.Internal.EnumLiteMap<ChecksumType>
|
|
|
- internalGetValueMap() {
|
|
|
- return internalValueMap;
|
|
|
- }
|
|
|
- private static com.google.protobuf.Internal.EnumLiteMap<ChecksumType>
|
|
|
- internalValueMap =
|
|
|
- new com.google.protobuf.Internal.EnumLiteMap<ChecksumType>() {
|
|
|
- public ChecksumType findValueByNumber(int number) {
|
|
|
- return ChecksumType.valueOf(number);
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
- getValueDescriptor() {
|
|
|
- return getDescriptor().getValues().get(index);
|
|
|
- }
|
|
|
- public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return getDescriptor();
|
|
|
- }
|
|
|
- public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDescriptor().getEnumTypes().get(0);
|
|
|
- }
|
|
|
-
|
|
|
- private static final ChecksumType[] VALUES = {
|
|
|
- NULL, CRC32, CRC32C,
|
|
|
- };
|
|
|
-
|
|
|
- public static ChecksumType valueOf(
|
|
|
- com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
- if (desc.getType() != getDescriptor()) {
|
|
|
- throw new java.lang.IllegalArgumentException(
|
|
|
- "EnumValueDescriptor is not for this type.");
|
|
|
- }
|
|
|
- return VALUES[desc.getIndex()];
|
|
|
- }
|
|
|
-
|
|
|
- private final int index;
|
|
|
- private final int value;
|
|
|
-
|
|
|
- private ChecksumType(int index, int value) {
|
|
|
- this.index = index;
|
|
|
- this.value = value;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(enum_scope:ChecksumProto.ChecksumType)
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .ChecksumProto.ChecksumType type = 1;
|
|
|
- public static final int TYPE_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType type_;
|
|
|
- public boolean hasType() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType() {
|
|
|
- return type_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint32 bytesPerChecksum = 2;
|
|
|
- public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
|
|
|
- private int bytesPerChecksum_;
|
|
|
- public boolean hasBytesPerChecksum() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public int getBytesPerChecksum() {
|
|
|
- return bytesPerChecksum_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
|
|
|
- bytesPerChecksum_ = 0;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasType()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasBytesPerChecksum()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeEnum(1, type_.getNumber());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeUInt32(2, bytesPerChecksum_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeEnumSize(1, type_.getNumber());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt32Size(2, bytesPerChecksum_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasType() == other.hasType());
|
|
|
- if (hasType()) {
|
|
|
- result = result &&
|
|
|
- (getType() == other.getType());
|
|
|
- }
|
|
|
- result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum());
|
|
|
- if (hasBytesPerChecksum()) {
|
|
|
- result = result && (getBytesPerChecksum()
|
|
|
- == other.getBytesPerChecksum());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasType()) {
|
|
|
- hash = (37 * hash) + TYPE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashEnum(getType());
|
|
|
- }
|
|
|
- if (hasBytesPerChecksum()) {
|
|
|
- hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getBytesPerChecksum();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ChecksumProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- bytesPerChecksum_ = 0;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.type_ = type_;
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.bytesPerChecksum_ = bytesPerChecksum_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasType()) {
|
|
|
- setType(other.getType());
|
|
|
- }
|
|
|
- if (other.hasBytesPerChecksum()) {
|
|
|
- setBytesPerChecksum(other.getBytesPerChecksum());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasType()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasBytesPerChecksum()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 8: {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(1, rawValue);
|
|
|
- } else {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- type_ = value;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 16: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- bytesPerChecksum_ = input.readUInt32();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .ChecksumProto.ChecksumType type = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
|
|
|
- public boolean hasType() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType getType() {
|
|
|
- return type_;
|
|
|
- }
|
|
|
- public Builder setType(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- type_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearType() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- type_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.ChecksumType.NULL;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint32 bytesPerChecksum = 2;
|
|
|
- private int bytesPerChecksum_ ;
|
|
|
- public boolean hasBytesPerChecksum() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public int getBytesPerChecksum() {
|
|
|
- return bytesPerChecksum_;
|
|
|
- }
|
|
|
- public Builder setBytesPerChecksum(int value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- bytesPerChecksum_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearBytesPerChecksum() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- bytesPerChecksum_ = 0;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:ChecksumProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new ChecksumProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:ChecksumProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpWriteBlockProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- boolean hasHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
|
|
|
-
|
|
|
- // repeated .DatanodeInfoProto targets = 2;
|
|
|
- java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
|
|
|
- getTargetsList();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
|
|
|
- int getTargetsCount();
|
|
|
- java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsOrBuilderList();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
|
|
|
- int index);
|
|
|
-
|
|
|
- // optional .DatanodeInfoProto source = 3;
|
|
|
- boolean hasSource();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
|
|
|
-
|
|
|
- // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
|
|
|
- boolean hasStage();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();
|
|
|
-
|
|
|
- // required uint32 pipelineSize = 5;
|
|
|
- boolean hasPipelineSize();
|
|
|
- int getPipelineSize();
|
|
|
-
|
|
|
- // required uint64 minBytesRcvd = 6;
|
|
|
- boolean hasMinBytesRcvd();
|
|
|
- long getMinBytesRcvd();
|
|
|
-
|
|
|
- // required uint64 maxBytesRcvd = 7;
|
|
|
- boolean hasMaxBytesRcvd();
|
|
|
- long getMaxBytesRcvd();
|
|
|
-
|
|
|
- // required uint64 latestGenerationStamp = 8;
|
|
|
- boolean hasLatestGenerationStamp();
|
|
|
- long getLatestGenerationStamp();
|
|
|
-
|
|
|
- // required .ChecksumProto requestedChecksum = 9;
|
|
|
- boolean hasRequestedChecksum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder();
|
|
|
- }
|
|
|
- public static final class OpWriteBlockProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpWriteBlockProtoOrBuilder {
|
|
|
- // Use OpWriteBlockProto.newBuilder() to construct.
|
|
|
- private OpWriteBlockProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpWriteBlockProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpWriteBlockProto defaultInstance;
|
|
|
- public static OpWriteBlockProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpWriteBlockProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- public enum BlockConstructionStage
|
|
|
- implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
- PIPELINE_SETUP_APPEND(0, 0),
|
|
|
- PIPELINE_SETUP_APPEND_RECOVERY(1, 1),
|
|
|
- DATA_STREAMING(2, 2),
|
|
|
- PIPELINE_SETUP_STREAMING_RECOVERY(3, 3),
|
|
|
- PIPELINE_CLOSE(4, 4),
|
|
|
- PIPELINE_CLOSE_RECOVERY(5, 5),
|
|
|
- PIPELINE_SETUP_CREATE(6, 6),
|
|
|
- TRANSFER_RBW(7, 7),
|
|
|
- TRANSFER_FINALIZED(8, 8),
|
|
|
- ;
|
|
|
-
|
|
|
- public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
|
|
|
- public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
|
|
|
- public static final int DATA_STREAMING_VALUE = 2;
|
|
|
- public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
|
|
|
- public static final int PIPELINE_CLOSE_VALUE = 4;
|
|
|
- public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
|
|
|
- public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
|
|
|
- public static final int TRANSFER_RBW_VALUE = 7;
|
|
|
- public static final int TRANSFER_FINALIZED_VALUE = 8;
|
|
|
-
|
|
|
-
|
|
|
- public final int getNumber() { return value; }
|
|
|
-
|
|
|
- public static BlockConstructionStage valueOf(int value) {
|
|
|
- switch (value) {
|
|
|
- case 0: return PIPELINE_SETUP_APPEND;
|
|
|
- case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
|
|
|
- case 2: return DATA_STREAMING;
|
|
|
- case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
|
|
|
- case 4: return PIPELINE_CLOSE;
|
|
|
- case 5: return PIPELINE_CLOSE_RECOVERY;
|
|
|
- case 6: return PIPELINE_SETUP_CREATE;
|
|
|
- case 7: return TRANSFER_RBW;
|
|
|
- case 8: return TRANSFER_FINALIZED;
|
|
|
- default: return null;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
|
|
|
- internalGetValueMap() {
|
|
|
- return internalValueMap;
|
|
|
- }
|
|
|
- private static com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
|
|
|
- internalValueMap =
|
|
|
- new com.google.protobuf.Internal.EnumLiteMap<BlockConstructionStage>() {
|
|
|
- public BlockConstructionStage findValueByNumber(int number) {
|
|
|
- return BlockConstructionStage.valueOf(number);
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
- getValueDescriptor() {
|
|
|
- return getDescriptor().getValues().get(index);
|
|
|
- }
|
|
|
- public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return getDescriptor();
|
|
|
- }
|
|
|
- public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
|
|
|
- }
|
|
|
-
|
|
|
- private static final BlockConstructionStage[] VALUES = {
|
|
|
- PIPELINE_SETUP_APPEND, PIPELINE_SETUP_APPEND_RECOVERY, DATA_STREAMING, PIPELINE_SETUP_STREAMING_RECOVERY, PIPELINE_CLOSE, PIPELINE_CLOSE_RECOVERY, PIPELINE_SETUP_CREATE, TRANSFER_RBW, TRANSFER_FINALIZED,
|
|
|
- };
|
|
|
-
|
|
|
- public static BlockConstructionStage valueOf(
|
|
|
- com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
- if (desc.getType() != getDescriptor()) {
|
|
|
- throw new java.lang.IllegalArgumentException(
|
|
|
- "EnumValueDescriptor is not for this type.");
|
|
|
- }
|
|
|
- return VALUES[desc.getIndex()];
|
|
|
- }
|
|
|
-
|
|
|
- private final int index;
|
|
|
- private final int value;
|
|
|
-
|
|
|
- private BlockConstructionStage(int index, int value) {
|
|
|
- this.index = index;
|
|
|
- this.value = value;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(enum_scope:OpWriteBlockProto.BlockConstructionStage)
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- public static final int HEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
-
|
|
|
- // repeated .DatanodeInfoProto targets = 2;
|
|
|
- public static final int TARGETS_FIELD_NUMBER = 2;
|
|
|
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
|
|
|
- return targets_;
|
|
|
- }
|
|
|
- public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsOrBuilderList() {
|
|
|
- return targets_;
|
|
|
- }
|
|
|
- public int getTargetsCount() {
|
|
|
- return targets_.size();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
|
|
|
- return targets_.get(index);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
|
|
|
- int index) {
|
|
|
- return targets_.get(index);
|
|
|
- }
|
|
|
-
|
|
|
- // optional .DatanodeInfoProto source = 3;
|
|
|
- public static final int SOURCE_FIELD_NUMBER = 3;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
|
|
|
- public boolean hasSource() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
|
|
|
- return source_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
|
|
|
- return source_;
|
|
|
- }
|
|
|
-
|
|
|
- // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
|
|
|
- public static final int STAGE_FIELD_NUMBER = 4;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_;
|
|
|
- public boolean hasStage() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
|
|
|
- return stage_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint32 pipelineSize = 5;
|
|
|
- public static final int PIPELINESIZE_FIELD_NUMBER = 5;
|
|
|
- private int pipelineSize_;
|
|
|
- public boolean hasPipelineSize() {
|
|
|
- return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
- }
|
|
|
- public int getPipelineSize() {
|
|
|
- return pipelineSize_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 minBytesRcvd = 6;
|
|
|
- public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
|
|
|
- private long minBytesRcvd_;
|
|
|
- public boolean hasMinBytesRcvd() {
|
|
|
- return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
- }
|
|
|
- public long getMinBytesRcvd() {
|
|
|
- return minBytesRcvd_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 maxBytesRcvd = 7;
|
|
|
- public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
|
|
|
- private long maxBytesRcvd_;
|
|
|
- public boolean hasMaxBytesRcvd() {
|
|
|
- return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
- }
|
|
|
- public long getMaxBytesRcvd() {
|
|
|
- return maxBytesRcvd_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 latestGenerationStamp = 8;
|
|
|
- public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
|
|
|
- private long latestGenerationStamp_;
|
|
|
- public boolean hasLatestGenerationStamp() {
|
|
|
- return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
- }
|
|
|
- public long getLatestGenerationStamp() {
|
|
|
- return latestGenerationStamp_;
|
|
|
- }
|
|
|
-
|
|
|
- // required .ChecksumProto requestedChecksum = 9;
|
|
|
- public static final int REQUESTEDCHECKSUM_FIELD_NUMBER = 9;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
|
|
|
- public boolean hasRequestedChecksum() {
|
|
|
- return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
|
|
|
- return requestedChecksum_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
|
|
|
- return requestedChecksum_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- targets_ = java.util.Collections.emptyList();
|
|
|
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
|
|
|
- pipelineSize_ = 0;
|
|
|
- minBytesRcvd_ = 0L;
|
|
|
- maxBytesRcvd_ = 0L;
|
|
|
- latestGenerationStamp_ = 0L;
|
|
|
- requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasStage()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasPipelineSize()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasMinBytesRcvd()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasMaxBytesRcvd()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasLatestGenerationStamp()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasRequestedChecksum()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- for (int i = 0; i < getTargetsCount(); i++) {
|
|
|
- if (!getTargets(i).isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- if (hasSource()) {
|
|
|
- if (!getSource().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- if (!getRequestedChecksum().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, header_);
|
|
|
- }
|
|
|
- for (int i = 0; i < targets_.size(); i++) {
|
|
|
- output.writeMessage(2, targets_.get(i));
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeMessage(3, source_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeEnum(4, stage_.getNumber());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- output.writeUInt32(5, pipelineSize_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
- output.writeUInt64(6, minBytesRcvd_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
- output.writeUInt64(7, maxBytesRcvd_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
- output.writeUInt64(8, latestGenerationStamp_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
- output.writeMessage(9, requestedChecksum_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, header_);
|
|
|
- }
|
|
|
- for (int i = 0; i < targets_.size(); i++) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(2, targets_.get(i));
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(3, source_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeEnumSize(4, stage_.getNumber());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt32Size(5, pipelineSize_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(6, minBytesRcvd_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(7, maxBytesRcvd_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(8, latestGenerationStamp_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(9, requestedChecksum_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasHeader() == other.hasHeader());
|
|
|
- if (hasHeader()) {
|
|
|
- result = result && getHeader()
|
|
|
- .equals(other.getHeader());
|
|
|
- }
|
|
|
- result = result && getTargetsList()
|
|
|
- .equals(other.getTargetsList());
|
|
|
- result = result && (hasSource() == other.hasSource());
|
|
|
- if (hasSource()) {
|
|
|
- result = result && getSource()
|
|
|
- .equals(other.getSource());
|
|
|
- }
|
|
|
- result = result && (hasStage() == other.hasStage());
|
|
|
- if (hasStage()) {
|
|
|
- result = result &&
|
|
|
- (getStage() == other.getStage());
|
|
|
- }
|
|
|
- result = result && (hasPipelineSize() == other.hasPipelineSize());
|
|
|
- if (hasPipelineSize()) {
|
|
|
- result = result && (getPipelineSize()
|
|
|
- == other.getPipelineSize());
|
|
|
- }
|
|
|
- result = result && (hasMinBytesRcvd() == other.hasMinBytesRcvd());
|
|
|
- if (hasMinBytesRcvd()) {
|
|
|
- result = result && (getMinBytesRcvd()
|
|
|
- == other.getMinBytesRcvd());
|
|
|
- }
|
|
|
- result = result && (hasMaxBytesRcvd() == other.hasMaxBytesRcvd());
|
|
|
- if (hasMaxBytesRcvd()) {
|
|
|
- result = result && (getMaxBytesRcvd()
|
|
|
- == other.getMaxBytesRcvd());
|
|
|
- }
|
|
|
- result = result && (hasLatestGenerationStamp() == other.hasLatestGenerationStamp());
|
|
|
- if (hasLatestGenerationStamp()) {
|
|
|
- result = result && (getLatestGenerationStamp()
|
|
|
- == other.getLatestGenerationStamp());
|
|
|
- }
|
|
|
- result = result && (hasRequestedChecksum() == other.hasRequestedChecksum());
|
|
|
- if (hasRequestedChecksum()) {
|
|
|
- result = result && getRequestedChecksum()
|
|
|
- .equals(other.getRequestedChecksum());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasHeader()) {
|
|
|
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getHeader().hashCode();
|
|
|
- }
|
|
|
- if (getTargetsCount() > 0) {
|
|
|
- hash = (37 * hash) + TARGETS_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getTargetsList().hashCode();
|
|
|
- }
|
|
|
- if (hasSource()) {
|
|
|
- hash = (37 * hash) + SOURCE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getSource().hashCode();
|
|
|
- }
|
|
|
- if (hasStage()) {
|
|
|
- hash = (37 * hash) + STAGE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashEnum(getStage());
|
|
|
- }
|
|
|
- if (hasPipelineSize()) {
|
|
|
- hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getPipelineSize();
|
|
|
- }
|
|
|
- if (hasMinBytesRcvd()) {
|
|
|
- hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getMinBytesRcvd());
|
|
|
- }
|
|
|
- if (hasMaxBytesRcvd()) {
|
|
|
- hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getMaxBytesRcvd());
|
|
|
- }
|
|
|
- if (hasLatestGenerationStamp()) {
|
|
|
- hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getLatestGenerationStamp());
|
|
|
- }
|
|
|
- if (hasRequestedChecksum()) {
|
|
|
- hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getRequestedChecksum().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpWriteBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getHeaderFieldBuilder();
|
|
|
- getTargetsFieldBuilder();
|
|
|
- getSourceFieldBuilder();
|
|
|
- getRequestedChecksumFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- targets_ = java.util.Collections.emptyList();
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- } else {
|
|
|
- targetsBuilder_.clear();
|
|
|
- }
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- sourceBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- pipelineSize_ = 0;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
- minBytesRcvd_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
- maxBytesRcvd_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
- latestGenerationStamp_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- requestedChecksumBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000100);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- result.header_ = header_;
|
|
|
- } else {
|
|
|
- result.header_ = headerBuilder_.build();
|
|
|
- }
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- targets_ = java.util.Collections.unmodifiableList(targets_);
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- }
|
|
|
- result.targets_ = targets_;
|
|
|
- } else {
|
|
|
- result.targets_ = targetsBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- result.source_ = source_;
|
|
|
- } else {
|
|
|
- result.source_ = sourceBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- to_bitField0_ |= 0x00000004;
|
|
|
- }
|
|
|
- result.stage_ = stage_;
|
|
|
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
- to_bitField0_ |= 0x00000008;
|
|
|
- }
|
|
|
- result.pipelineSize_ = pipelineSize_;
|
|
|
- if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
- to_bitField0_ |= 0x00000010;
|
|
|
- }
|
|
|
- result.minBytesRcvd_ = minBytesRcvd_;
|
|
|
- if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
- to_bitField0_ |= 0x00000020;
|
|
|
- }
|
|
|
- result.maxBytesRcvd_ = maxBytesRcvd_;
|
|
|
- if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
- to_bitField0_ |= 0x00000040;
|
|
|
- }
|
|
|
- result.latestGenerationStamp_ = latestGenerationStamp_;
|
|
|
- if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
|
|
|
- to_bitField0_ |= 0x00000080;
|
|
|
- }
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- result.requestedChecksum_ = requestedChecksum_;
|
|
|
- } else {
|
|
|
- result.requestedChecksum_ = requestedChecksumBuilder_.build();
|
|
|
- }
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasHeader()) {
|
|
|
- mergeHeader(other.getHeader());
|
|
|
- }
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (!other.targets_.isEmpty()) {
|
|
|
- if (targets_.isEmpty()) {
|
|
|
- targets_ = other.targets_;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- } else {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.addAll(other.targets_);
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (!other.targets_.isEmpty()) {
|
|
|
- if (targetsBuilder_.isEmpty()) {
|
|
|
- targetsBuilder_.dispose();
|
|
|
- targetsBuilder_ = null;
|
|
|
- targets_ = other.targets_;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- targetsBuilder_ =
|
|
|
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
- getTargetsFieldBuilder() : null;
|
|
|
- } else {
|
|
|
- targetsBuilder_.addAllMessages(other.targets_);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- if (other.hasSource()) {
|
|
|
- mergeSource(other.getSource());
|
|
|
- }
|
|
|
- if (other.hasStage()) {
|
|
|
- setStage(other.getStage());
|
|
|
- }
|
|
|
- if (other.hasPipelineSize()) {
|
|
|
- setPipelineSize(other.getPipelineSize());
|
|
|
- }
|
|
|
- if (other.hasMinBytesRcvd()) {
|
|
|
- setMinBytesRcvd(other.getMinBytesRcvd());
|
|
|
- }
|
|
|
- if (other.hasMaxBytesRcvd()) {
|
|
|
- setMaxBytesRcvd(other.getMaxBytesRcvd());
|
|
|
- }
|
|
|
- if (other.hasLatestGenerationStamp()) {
|
|
|
- setLatestGenerationStamp(other.getLatestGenerationStamp());
|
|
|
- }
|
|
|
- if (other.hasRequestedChecksum()) {
|
|
|
- mergeRequestedChecksum(other.getRequestedChecksum());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasStage()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasPipelineSize()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasMinBytesRcvd()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasMaxBytesRcvd()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasLatestGenerationStamp()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasRequestedChecksum()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- for (int i = 0; i < getTargetsCount(); i++) {
|
|
|
- if (!getTargets(i).isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- if (hasSource()) {
|
|
|
- if (!getSource().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- if (!getRequestedChecksum().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder();
|
|
|
- if (hasHeader()) {
|
|
|
- subBuilder.mergeFrom(getHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- addTargets(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 26: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
|
|
|
- if (hasSource()) {
|
|
|
- subBuilder.mergeFrom(getSource());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setSource(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 32: {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(4, rawValue);
|
|
|
- } else {
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- stage_ = value;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 40: {
|
|
|
- bitField0_ |= 0x00000010;
|
|
|
- pipelineSize_ = input.readUInt32();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 48: {
|
|
|
- bitField0_ |= 0x00000020;
|
|
|
- minBytesRcvd_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 56: {
|
|
|
- bitField0_ |= 0x00000040;
|
|
|
- maxBytesRcvd_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 64: {
|
|
|
- bitField0_ |= 0x00000080;
|
|
|
- latestGenerationStamp_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 74: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder();
|
|
|
- if (hasRequestedChecksum()) {
|
|
|
- subBuilder.mergeFrom(getRequestedChecksum());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setRequestedChecksum(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- return header_;
|
|
|
- } else {
|
|
|
- return headerBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- header_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
|
|
|
- header_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- header_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- if (headerBuilder_ != null) {
|
|
|
- return headerBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
|
|
|
- getHeaderFieldBuilder() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
|
|
|
- header_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- header_ = null;
|
|
|
- }
|
|
|
- return headerBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // repeated .DatanodeInfoProto targets = 2;
|
|
|
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_ =
|
|
|
- java.util.Collections.emptyList();
|
|
|
- private void ensureTargetsIsMutable() {
|
|
|
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- targets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(targets_);
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
|
|
|
-
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return java.util.Collections.unmodifiableList(targets_);
|
|
|
- } else {
|
|
|
- return targetsBuilder_.getMessageList();
|
|
|
- }
|
|
|
- }
|
|
|
- public int getTargetsCount() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return targets_.size();
|
|
|
- } else {
|
|
|
- return targetsBuilder_.getCount();
|
|
|
- }
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return targets_.get(index);
|
|
|
- } else {
|
|
|
- return targetsBuilder_.getMessage(index);
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.set(index, value);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.setMessage(index, value);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.set(index, builderForValue.build());
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.setMessage(index, builderForValue.build());
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(value);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(value);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(index, value);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(index, value);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(builderForValue.build());
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(builderForValue.build());
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(index, builderForValue.build());
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(index, builderForValue.build());
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addAllTargets(
|
|
|
- java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- super.addAll(values, targets_);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addAllMessages(values);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearTargets() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- targets_ = java.util.Collections.emptyList();
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.clear();
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder removeTargets(int index) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.remove(index);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.remove(index);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
|
|
|
- int index) {
|
|
|
- return getTargetsFieldBuilder().getBuilder(index);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
|
|
|
- int index) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return targets_.get(index); } else {
|
|
|
- return targetsBuilder_.getMessageOrBuilder(index);
|
|
|
- }
|
|
|
- }
|
|
|
- public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsOrBuilderList() {
|
|
|
- if (targetsBuilder_ != null) {
|
|
|
- return targetsBuilder_.getMessageOrBuilderList();
|
|
|
- } else {
|
|
|
- return java.util.Collections.unmodifiableList(targets_);
|
|
|
- }
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
|
|
|
- return getTargetsFieldBuilder().addBuilder(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
|
|
|
- int index) {
|
|
|
- return getTargetsFieldBuilder().addBuilder(
|
|
|
- index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
|
|
|
- }
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder>
|
|
|
- getTargetsBuilderList() {
|
|
|
- return getTargetsFieldBuilder().getBuilderList();
|
|
|
- }
|
|
|
- private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsFieldBuilder() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
|
|
|
- targets_,
|
|
|
- ((bitField0_ & 0x00000002) == 0x00000002),
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- targets_ = null;
|
|
|
- }
|
|
|
- return targetsBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional .DatanodeInfoProto source = 3;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
|
|
|
- public boolean hasSource() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- return source_;
|
|
|
- } else {
|
|
|
- return sourceBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- source_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setSource(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- source_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004) &&
|
|
|
- source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
|
|
|
- source_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- source_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearSource() {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- onChanged();
|
|
|
- return getSourceFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
|
|
|
- if (sourceBuilder_ != null) {
|
|
|
- return sourceBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return source_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getSourceFieldBuilder() {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- sourceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
|
|
|
- source_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- source_ = null;
|
|
|
- }
|
|
|
- return sourceBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // required .OpWriteBlockProto.BlockConstructionStage stage = 4;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
|
|
|
- public boolean hasStage() {
|
|
|
- return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
|
|
|
- return stage_;
|
|
|
- }
|
|
|
- public Builder setStage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- stage_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearStage() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- stage_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint32 pipelineSize = 5;
|
|
|
- private int pipelineSize_ ;
|
|
|
- public boolean hasPipelineSize() {
|
|
|
- return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
- }
|
|
|
- public int getPipelineSize() {
|
|
|
- return pipelineSize_;
|
|
|
- }
|
|
|
- public Builder setPipelineSize(int value) {
|
|
|
- bitField0_ |= 0x00000010;
|
|
|
- pipelineSize_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearPipelineSize() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
- pipelineSize_ = 0;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 minBytesRcvd = 6;
|
|
|
- private long minBytesRcvd_ ;
|
|
|
- public boolean hasMinBytesRcvd() {
|
|
|
- return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
- }
|
|
|
- public long getMinBytesRcvd() {
|
|
|
- return minBytesRcvd_;
|
|
|
- }
|
|
|
- public Builder setMinBytesRcvd(long value) {
|
|
|
- bitField0_ |= 0x00000020;
|
|
|
- minBytesRcvd_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearMinBytesRcvd() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
- minBytesRcvd_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 maxBytesRcvd = 7;
|
|
|
- private long maxBytesRcvd_ ;
|
|
|
- public boolean hasMaxBytesRcvd() {
|
|
|
- return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
- }
|
|
|
- public long getMaxBytesRcvd() {
|
|
|
- return maxBytesRcvd_;
|
|
|
- }
|
|
|
- public Builder setMaxBytesRcvd(long value) {
|
|
|
- bitField0_ |= 0x00000040;
|
|
|
- maxBytesRcvd_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearMaxBytesRcvd() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
- maxBytesRcvd_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 latestGenerationStamp = 8;
|
|
|
- private long latestGenerationStamp_ ;
|
|
|
- public boolean hasLatestGenerationStamp() {
|
|
|
- return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
- }
|
|
|
- public long getLatestGenerationStamp() {
|
|
|
- return latestGenerationStamp_;
|
|
|
- }
|
|
|
- public Builder setLatestGenerationStamp(long value) {
|
|
|
- bitField0_ |= 0x00000080;
|
|
|
- latestGenerationStamp_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearLatestGenerationStamp() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
- latestGenerationStamp_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required .ChecksumProto requestedChecksum = 9;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> requestedChecksumBuilder_;
|
|
|
- public boolean hasRequestedChecksum() {
|
|
|
- return ((bitField0_ & 0x00000100) == 0x00000100);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- return requestedChecksum_;
|
|
|
- } else {
|
|
|
- return requestedChecksumBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- requestedChecksum_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- requestedChecksumBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000100;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setRequestedChecksum(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- requestedChecksum_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- requestedChecksumBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000100;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000100) == 0x00000100) &&
|
|
|
- requestedChecksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
|
|
|
- requestedChecksum_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(requestedChecksum_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- requestedChecksum_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- requestedChecksumBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000100;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearRequestedChecksum() {
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- requestedChecksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- requestedChecksumBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000100);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getRequestedChecksumBuilder() {
|
|
|
- bitField0_ |= 0x00000100;
|
|
|
- onChanged();
|
|
|
- return getRequestedChecksumFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
|
|
|
- if (requestedChecksumBuilder_ != null) {
|
|
|
- return requestedChecksumBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return requestedChecksum_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
|
|
|
- getRequestedChecksumFieldBuilder() {
|
|
|
- if (requestedChecksumBuilder_ == null) {
|
|
|
- requestedChecksumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
|
|
|
- requestedChecksum_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- requestedChecksum_ = null;
|
|
|
- }
|
|
|
- return requestedChecksumBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpWriteBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpWriteBlockProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpWriteBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpTransferBlockProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- boolean hasHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();
|
|
|
-
|
|
|
- // repeated .DatanodeInfoProto targets = 2;
|
|
|
- java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
|
|
|
- getTargetsList();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
|
|
|
- int getTargetsCount();
|
|
|
- java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsOrBuilderList();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
|
|
|
- int index);
|
|
|
- }
|
|
|
- public static final class OpTransferBlockProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpTransferBlockProtoOrBuilder {
|
|
|
- // Use OpTransferBlockProto.newBuilder() to construct.
|
|
|
- private OpTransferBlockProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpTransferBlockProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpTransferBlockProto defaultInstance;
|
|
|
- public static OpTransferBlockProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpTransferBlockProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- public static final int HEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
-
|
|
|
- // repeated .DatanodeInfoProto targets = 2;
|
|
|
- public static final int TARGETS_FIELD_NUMBER = 2;
|
|
|
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
|
|
|
- return targets_;
|
|
|
- }
|
|
|
- public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsOrBuilderList() {
|
|
|
- return targets_;
|
|
|
- }
|
|
|
- public int getTargetsCount() {
|
|
|
- return targets_.size();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
|
|
|
- return targets_.get(index);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
|
|
|
- int index) {
|
|
|
- return targets_.get(index);
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- targets_ = java.util.Collections.emptyList();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- for (int i = 0; i < getTargetsCount(); i++) {
|
|
|
- if (!getTargets(i).isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, header_);
|
|
|
- }
|
|
|
- for (int i = 0; i < targets_.size(); i++) {
|
|
|
- output.writeMessage(2, targets_.get(i));
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, header_);
|
|
|
- }
|
|
|
- for (int i = 0; i < targets_.size(); i++) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(2, targets_.get(i));
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasHeader() == other.hasHeader());
|
|
|
- if (hasHeader()) {
|
|
|
- result = result && getHeader()
|
|
|
- .equals(other.getHeader());
|
|
|
- }
|
|
|
- result = result && getTargetsList()
|
|
|
- .equals(other.getTargetsList());
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasHeader()) {
|
|
|
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getHeader().hashCode();
|
|
|
- }
|
|
|
- if (getTargetsCount() > 0) {
|
|
|
- hash = (37 * hash) + TARGETS_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getTargetsList().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpTransferBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getHeaderFieldBuilder();
|
|
|
- getTargetsFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- targets_ = java.util.Collections.emptyList();
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- } else {
|
|
|
- targetsBuilder_.clear();
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- result.header_ = header_;
|
|
|
- } else {
|
|
|
- result.header_ = headerBuilder_.build();
|
|
|
- }
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- targets_ = java.util.Collections.unmodifiableList(targets_);
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- }
|
|
|
- result.targets_ = targets_;
|
|
|
- } else {
|
|
|
- result.targets_ = targetsBuilder_.build();
|
|
|
- }
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasHeader()) {
|
|
|
- mergeHeader(other.getHeader());
|
|
|
- }
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (!other.targets_.isEmpty()) {
|
|
|
- if (targets_.isEmpty()) {
|
|
|
- targets_ = other.targets_;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- } else {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.addAll(other.targets_);
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (!other.targets_.isEmpty()) {
|
|
|
- if (targetsBuilder_.isEmpty()) {
|
|
|
- targetsBuilder_.dispose();
|
|
|
- targetsBuilder_ = null;
|
|
|
- targets_ = other.targets_;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- targetsBuilder_ =
|
|
|
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
- getTargetsFieldBuilder() : null;
|
|
|
- } else {
|
|
|
- targetsBuilder_.addAllMessages(other.targets_);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- for (int i = 0; i < getTargetsCount(); i++) {
|
|
|
- if (!getTargets(i).isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder();
|
|
|
- if (hasHeader()) {
|
|
|
- subBuilder.mergeFrom(getHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- addTargets(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .ClientOperationHeaderProto header = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- return header_;
|
|
|
- } else {
|
|
|
- return headerBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- header_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
|
|
|
- header_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- header_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- if (headerBuilder_ != null) {
|
|
|
- return headerBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>
|
|
|
- getHeaderFieldBuilder() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
|
|
|
- header_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- header_ = null;
|
|
|
- }
|
|
|
- return headerBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // repeated .DatanodeInfoProto targets = 2;
|
|
|
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_ =
|
|
|
- java.util.Collections.emptyList();
|
|
|
- private void ensureTargetsIsMutable() {
|
|
|
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- targets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(targets_);
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;
|
|
|
-
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return java.util.Collections.unmodifiableList(targets_);
|
|
|
- } else {
|
|
|
- return targetsBuilder_.getMessageList();
|
|
|
- }
|
|
|
- }
|
|
|
- public int getTargetsCount() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return targets_.size();
|
|
|
- } else {
|
|
|
- return targetsBuilder_.getCount();
|
|
|
- }
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return targets_.get(index);
|
|
|
- } else {
|
|
|
- return targetsBuilder_.getMessage(index);
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.set(index, value);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.setMessage(index, value);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.set(index, builderForValue.build());
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.setMessage(index, builderForValue.build());
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(value);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(value);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(index, value);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(index, value);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(builderForValue.build());
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(builderForValue.build());
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addTargets(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.add(index, builderForValue.build());
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addMessage(index, builderForValue.build());
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addAllTargets(
|
|
|
- java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- super.addAll(values, targets_);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.addAllMessages(values);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearTargets() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- targets_ = java.util.Collections.emptyList();
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.clear();
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder removeTargets(int index) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- ensureTargetsIsMutable();
|
|
|
- targets_.remove(index);
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- targetsBuilder_.remove(index);
|
|
|
- }
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
|
|
|
- int index) {
|
|
|
- return getTargetsFieldBuilder().getBuilder(index);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
|
|
|
- int index) {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- return targets_.get(index); } else {
|
|
|
- return targetsBuilder_.getMessageOrBuilder(index);
|
|
|
- }
|
|
|
- }
|
|
|
- public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsOrBuilderList() {
|
|
|
- if (targetsBuilder_ != null) {
|
|
|
- return targetsBuilder_.getMessageOrBuilderList();
|
|
|
- } else {
|
|
|
- return java.util.Collections.unmodifiableList(targets_);
|
|
|
- }
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
|
|
|
- return getTargetsFieldBuilder().addBuilder(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
|
|
|
- int index) {
|
|
|
- return getTargetsFieldBuilder().addBuilder(
|
|
|
- index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
|
|
|
- }
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder>
|
|
|
- getTargetsBuilderList() {
|
|
|
- return getTargetsFieldBuilder().getBuilderList();
|
|
|
- }
|
|
|
- private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getTargetsFieldBuilder() {
|
|
|
- if (targetsBuilder_ == null) {
|
|
|
- targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
|
|
|
- targets_,
|
|
|
- ((bitField0_ & 0x00000002) == 0x00000002),
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- targets_ = null;
|
|
|
- }
|
|
|
- return targetsBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpTransferBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpTransferBlockProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpTransferBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpReplaceBlockProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- boolean hasHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
|
|
|
-
|
|
|
- // required string delHint = 2;
|
|
|
- boolean hasDelHint();
|
|
|
- String getDelHint();
|
|
|
-
|
|
|
- // required .DatanodeInfoProto source = 3;
|
|
|
- boolean hasSource();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();
|
|
|
- }
|
|
|
- public static final class OpReplaceBlockProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpReplaceBlockProtoOrBuilder {
|
|
|
- // Use OpReplaceBlockProto.newBuilder() to construct.
|
|
|
- private OpReplaceBlockProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpReplaceBlockProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpReplaceBlockProto defaultInstance;
|
|
|
- public static OpReplaceBlockProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpReplaceBlockProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- public static final int HEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
-
|
|
|
- // required string delHint = 2;
|
|
|
- public static final int DELHINT_FIELD_NUMBER = 2;
|
|
|
- private java.lang.Object delHint_;
|
|
|
- public boolean hasDelHint() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public String getDelHint() {
|
|
|
- java.lang.Object ref = delHint_;
|
|
|
- if (ref instanceof String) {
|
|
|
- return (String) ref;
|
|
|
- } else {
|
|
|
- com.google.protobuf.ByteString bs =
|
|
|
- (com.google.protobuf.ByteString) ref;
|
|
|
- String s = bs.toStringUtf8();
|
|
|
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
- delHint_ = s;
|
|
|
- }
|
|
|
- return s;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.ByteString getDelHintBytes() {
|
|
|
- java.lang.Object ref = delHint_;
|
|
|
- if (ref instanceof String) {
|
|
|
- com.google.protobuf.ByteString b =
|
|
|
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
- delHint_ = b;
|
|
|
- return b;
|
|
|
- } else {
|
|
|
- return (com.google.protobuf.ByteString) ref;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // required .DatanodeInfoProto source = 3;
|
|
|
- public static final int SOURCE_FIELD_NUMBER = 3;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
|
|
|
- public boolean hasSource() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
|
|
|
- return source_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
|
|
|
- return source_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- delHint_ = "";
|
|
|
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasDelHint()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasSource()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getSource().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, header_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeBytes(2, getDelHintBytes());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeMessage(3, source_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, header_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeBytesSize(2, getDelHintBytes());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(3, source_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasHeader() == other.hasHeader());
|
|
|
- if (hasHeader()) {
|
|
|
- result = result && getHeader()
|
|
|
- .equals(other.getHeader());
|
|
|
- }
|
|
|
- result = result && (hasDelHint() == other.hasDelHint());
|
|
|
- if (hasDelHint()) {
|
|
|
- result = result && getDelHint()
|
|
|
- .equals(other.getDelHint());
|
|
|
- }
|
|
|
- result = result && (hasSource() == other.hasSource());
|
|
|
- if (hasSource()) {
|
|
|
- result = result && getSource()
|
|
|
- .equals(other.getSource());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasHeader()) {
|
|
|
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getHeader().hashCode();
|
|
|
- }
|
|
|
- if (hasDelHint()) {
|
|
|
- hash = (37 * hash) + DELHINT_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getDelHint().hashCode();
|
|
|
- }
|
|
|
- if (hasSource()) {
|
|
|
- hash = (37 * hash) + SOURCE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getSource().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpReplaceBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getHeaderFieldBuilder();
|
|
|
- getSourceFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- delHint_ = "";
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- sourceBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- result.header_ = header_;
|
|
|
- } else {
|
|
|
- result.header_ = headerBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.delHint_ = delHint_;
|
|
|
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- to_bitField0_ |= 0x00000004;
|
|
|
- }
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- result.source_ = source_;
|
|
|
- } else {
|
|
|
- result.source_ = sourceBuilder_.build();
|
|
|
- }
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasHeader()) {
|
|
|
- mergeHeader(other.getHeader());
|
|
|
- }
|
|
|
- if (other.hasDelHint()) {
|
|
|
- setDelHint(other.getDelHint());
|
|
|
- }
|
|
|
- if (other.hasSource()) {
|
|
|
- mergeSource(other.getSource());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasDelHint()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasSource()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getSource().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder();
|
|
|
- if (hasHeader()) {
|
|
|
- subBuilder.mergeFrom(getHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- delHint_ = input.readBytes();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 26: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
|
|
|
- if (hasSource()) {
|
|
|
- subBuilder.mergeFrom(getSource());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setSource(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- return header_;
|
|
|
- } else {
|
|
|
- return headerBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- header_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
|
|
|
- header_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- header_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- if (headerBuilder_ != null) {
|
|
|
- return headerBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
|
|
|
- getHeaderFieldBuilder() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
|
|
|
- header_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- header_ = null;
|
|
|
- }
|
|
|
- return headerBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // required string delHint = 2;
|
|
|
- private java.lang.Object delHint_ = "";
|
|
|
- public boolean hasDelHint() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public String getDelHint() {
|
|
|
- java.lang.Object ref = delHint_;
|
|
|
- if (!(ref instanceof String)) {
|
|
|
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
- delHint_ = s;
|
|
|
- return s;
|
|
|
- } else {
|
|
|
- return (String) ref;
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setDelHint(String value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- delHint_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearDelHint() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- delHint_ = getDefaultInstance().getDelHint();
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- void setDelHint(com.google.protobuf.ByteString value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- delHint_ = value;
|
|
|
- onChanged();
|
|
|
- }
|
|
|
-
|
|
|
- // required .DatanodeInfoProto source = 3;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
|
|
|
- public boolean hasSource() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- return source_;
|
|
|
- } else {
|
|
|
- return sourceBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- source_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setSource(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- source_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004) &&
|
|
|
- source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
|
|
|
- source_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder(source_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- source_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearSource() {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- source_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- sourceBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- onChanged();
|
|
|
- return getSourceFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
|
|
|
- if (sourceBuilder_ != null) {
|
|
|
- return sourceBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return source_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
- getSourceFieldBuilder() {
|
|
|
- if (sourceBuilder_ == null) {
|
|
|
- sourceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
|
|
|
- source_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- source_ = null;
|
|
|
- }
|
|
|
- return sourceBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpReplaceBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpReplaceBlockProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpReplaceBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpCopyBlockProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- boolean hasHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
|
|
|
- }
|
|
|
- public static final class OpCopyBlockProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpCopyBlockProtoOrBuilder {
|
|
|
- // Use OpCopyBlockProto.newBuilder() to construct.
|
|
|
- private OpCopyBlockProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpCopyBlockProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpCopyBlockProto defaultInstance;
|
|
|
- public static OpCopyBlockProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpCopyBlockProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- public static final int HEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, header_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, header_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasHeader() == other.hasHeader());
|
|
|
- if (hasHeader()) {
|
|
|
- result = result && getHeader()
|
|
|
- .equals(other.getHeader());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasHeader()) {
|
|
|
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getHeader().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpCopyBlockProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getHeaderFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- result.header_ = header_;
|
|
|
- } else {
|
|
|
- result.header_ = headerBuilder_.build();
|
|
|
- }
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasHeader()) {
|
|
|
- mergeHeader(other.getHeader());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder();
|
|
|
- if (hasHeader()) {
|
|
|
- subBuilder.mergeFrom(getHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- return header_;
|
|
|
- } else {
|
|
|
- return headerBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- header_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
|
|
|
- header_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- header_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- if (headerBuilder_ != null) {
|
|
|
- return headerBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
|
|
|
- getHeaderFieldBuilder() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
|
|
|
- header_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- header_ = null;
|
|
|
- }
|
|
|
- return headerBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpCopyBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpCopyBlockProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpCopyBlockProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpBlockChecksumProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- boolean hasHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
|
|
|
- }
|
|
|
- public static final class OpBlockChecksumProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpBlockChecksumProtoOrBuilder {
|
|
|
- // Use OpBlockChecksumProto.newBuilder() to construct.
|
|
|
- private OpBlockChecksumProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpBlockChecksumProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpBlockChecksumProto defaultInstance;
|
|
|
- public static OpBlockChecksumProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpBlockChecksumProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- public static final int HEADER_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- return header_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasHeader()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, header_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, header_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasHeader() == other.hasHeader());
|
|
|
- if (hasHeader()) {
|
|
|
- result = result && getHeader()
|
|
|
- .equals(other.getHeader());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasHeader()) {
|
|
|
- hash = (37 * hash) + HEADER_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getHeader().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getHeaderFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- result.header_ = header_;
|
|
|
- } else {
|
|
|
- result.header_ = headerBuilder_.build();
|
|
|
- }
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasHeader()) {
|
|
|
- mergeHeader(other.getHeader());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasHeader()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getHeader().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder();
|
|
|
- if (hasHeader()) {
|
|
|
- subBuilder.mergeFrom(getHeader());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setHeader(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .BaseHeaderProto header = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
|
|
|
- public boolean hasHeader() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- return header_;
|
|
|
- } else {
|
|
|
- return headerBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- header_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setHeader(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
|
|
|
- header_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder(header_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- header_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearHeader() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- header_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- headerBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getHeaderFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
|
|
|
- if (headerBuilder_ != null) {
|
|
|
- return headerBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return header_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>
|
|
|
- getHeaderFieldBuilder() {
|
|
|
- if (headerBuilder_ == null) {
|
|
|
- headerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
|
|
|
- header_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- header_ = null;
|
|
|
- }
|
|
|
- return headerBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpBlockChecksumProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpBlockChecksumProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpBlockChecksumProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface PacketHeaderProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required sfixed64 offsetInBlock = 1;
|
|
|
- boolean hasOffsetInBlock();
|
|
|
- long getOffsetInBlock();
|
|
|
-
|
|
|
- // required sfixed64 seqno = 2;
|
|
|
- boolean hasSeqno();
|
|
|
- long getSeqno();
|
|
|
-
|
|
|
- // required bool lastPacketInBlock = 3;
|
|
|
- boolean hasLastPacketInBlock();
|
|
|
- boolean getLastPacketInBlock();
|
|
|
-
|
|
|
- // required sfixed32 dataLen = 4;
|
|
|
- boolean hasDataLen();
|
|
|
- int getDataLen();
|
|
|
- }
|
|
|
- public static final class PacketHeaderProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements PacketHeaderProtoOrBuilder {
|
|
|
- // Use PacketHeaderProto.newBuilder() to construct.
|
|
|
- private PacketHeaderProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private PacketHeaderProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final PacketHeaderProto defaultInstance;
|
|
|
- public static PacketHeaderProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public PacketHeaderProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required sfixed64 offsetInBlock = 1;
|
|
|
- public static final int OFFSETINBLOCK_FIELD_NUMBER = 1;
|
|
|
- private long offsetInBlock_;
|
|
|
- public boolean hasOffsetInBlock() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public long getOffsetInBlock() {
|
|
|
- return offsetInBlock_;
|
|
|
- }
|
|
|
-
|
|
|
- // required sfixed64 seqno = 2;
|
|
|
- public static final int SEQNO_FIELD_NUMBER = 2;
|
|
|
- private long seqno_;
|
|
|
- public boolean hasSeqno() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getSeqno() {
|
|
|
- return seqno_;
|
|
|
- }
|
|
|
-
|
|
|
- // required bool lastPacketInBlock = 3;
|
|
|
- public static final int LASTPACKETINBLOCK_FIELD_NUMBER = 3;
|
|
|
- private boolean lastPacketInBlock_;
|
|
|
- public boolean hasLastPacketInBlock() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public boolean getLastPacketInBlock() {
|
|
|
- return lastPacketInBlock_;
|
|
|
- }
|
|
|
-
|
|
|
- // required sfixed32 dataLen = 4;
|
|
|
- public static final int DATALEN_FIELD_NUMBER = 4;
|
|
|
- private int dataLen_;
|
|
|
- public boolean hasDataLen() {
|
|
|
- return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
- }
|
|
|
- public int getDataLen() {
|
|
|
- return dataLen_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- offsetInBlock_ = 0L;
|
|
|
- seqno_ = 0L;
|
|
|
- lastPacketInBlock_ = false;
|
|
|
- dataLen_ = 0;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasOffsetInBlock()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasSeqno()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasLastPacketInBlock()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasDataLen()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeSFixed64(1, offsetInBlock_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeSFixed64(2, seqno_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeBool(3, lastPacketInBlock_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- output.writeSFixed32(4, dataLen_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeSFixed64Size(1, offsetInBlock_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeSFixed64Size(2, seqno_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeBoolSize(3, lastPacketInBlock_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeSFixed32Size(4, dataLen_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasOffsetInBlock() == other.hasOffsetInBlock());
|
|
|
- if (hasOffsetInBlock()) {
|
|
|
- result = result && (getOffsetInBlock()
|
|
|
- == other.getOffsetInBlock());
|
|
|
- }
|
|
|
- result = result && (hasSeqno() == other.hasSeqno());
|
|
|
- if (hasSeqno()) {
|
|
|
- result = result && (getSeqno()
|
|
|
- == other.getSeqno());
|
|
|
- }
|
|
|
- result = result && (hasLastPacketInBlock() == other.hasLastPacketInBlock());
|
|
|
- if (hasLastPacketInBlock()) {
|
|
|
- result = result && (getLastPacketInBlock()
|
|
|
- == other.getLastPacketInBlock());
|
|
|
- }
|
|
|
- result = result && (hasDataLen() == other.hasDataLen());
|
|
|
- if (hasDataLen()) {
|
|
|
- result = result && (getDataLen()
|
|
|
- == other.getDataLen());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasOffsetInBlock()) {
|
|
|
- hash = (37 * hash) + OFFSETINBLOCK_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getOffsetInBlock());
|
|
|
- }
|
|
|
- if (hasSeqno()) {
|
|
|
- hash = (37 * hash) + SEQNO_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getSeqno());
|
|
|
- }
|
|
|
- if (hasLastPacketInBlock()) {
|
|
|
- hash = (37 * hash) + LASTPACKETINBLOCK_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashBoolean(getLastPacketInBlock());
|
|
|
- }
|
|
|
- if (hasDataLen()) {
|
|
|
- hash = (37 * hash) + DATALEN_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getDataLen();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PacketHeaderProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- offsetInBlock_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- seqno_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- lastPacketInBlock_ = false;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- dataLen_ = 0;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.offsetInBlock_ = offsetInBlock_;
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.seqno_ = seqno_;
|
|
|
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- to_bitField0_ |= 0x00000004;
|
|
|
- }
|
|
|
- result.lastPacketInBlock_ = lastPacketInBlock_;
|
|
|
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- to_bitField0_ |= 0x00000008;
|
|
|
- }
|
|
|
- result.dataLen_ = dataLen_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasOffsetInBlock()) {
|
|
|
- setOffsetInBlock(other.getOffsetInBlock());
|
|
|
- }
|
|
|
- if (other.hasSeqno()) {
|
|
|
- setSeqno(other.getSeqno());
|
|
|
- }
|
|
|
- if (other.hasLastPacketInBlock()) {
|
|
|
- setLastPacketInBlock(other.getLastPacketInBlock());
|
|
|
- }
|
|
|
- if (other.hasDataLen()) {
|
|
|
- setDataLen(other.getDataLen());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasOffsetInBlock()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasSeqno()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasLastPacketInBlock()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasDataLen()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 9: {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- offsetInBlock_ = input.readSFixed64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 17: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- seqno_ = input.readSFixed64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 24: {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- lastPacketInBlock_ = input.readBool();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 37: {
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- dataLen_ = input.readSFixed32();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required sfixed64 offsetInBlock = 1;
|
|
|
- private long offsetInBlock_ ;
|
|
|
- public boolean hasOffsetInBlock() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public long getOffsetInBlock() {
|
|
|
- return offsetInBlock_;
|
|
|
- }
|
|
|
- public Builder setOffsetInBlock(long value) {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- offsetInBlock_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearOffsetInBlock() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- offsetInBlock_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required sfixed64 seqno = 2;
|
|
|
- private long seqno_ ;
|
|
|
- public boolean hasSeqno() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getSeqno() {
|
|
|
- return seqno_;
|
|
|
- }
|
|
|
- public Builder setSeqno(long value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- seqno_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearSeqno() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- seqno_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required bool lastPacketInBlock = 3;
|
|
|
- private boolean lastPacketInBlock_ ;
|
|
|
- public boolean hasLastPacketInBlock() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public boolean getLastPacketInBlock() {
|
|
|
- return lastPacketInBlock_;
|
|
|
- }
|
|
|
- public Builder setLastPacketInBlock(boolean value) {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- lastPacketInBlock_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearLastPacketInBlock() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- lastPacketInBlock_ = false;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required sfixed32 dataLen = 4;
|
|
|
- private int dataLen_ ;
|
|
|
- public boolean hasDataLen() {
|
|
|
- return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
- }
|
|
|
- public int getDataLen() {
|
|
|
- return dataLen_;
|
|
|
- }
|
|
|
- public Builder setDataLen(int value) {
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- dataLen_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearDataLen() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- dataLen_ = 0;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:PacketHeaderProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new PacketHeaderProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:PacketHeaderProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface PipelineAckProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required sint64 seqno = 1;
|
|
|
- boolean hasSeqno();
|
|
|
- long getSeqno();
|
|
|
-
|
|
|
- // repeated .Status status = 2;
|
|
|
- java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> getStatusList();
|
|
|
- int getStatusCount();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(int index);
|
|
|
- }
|
|
|
- public static final class PipelineAckProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements PipelineAckProtoOrBuilder {
|
|
|
- // Use PipelineAckProto.newBuilder() to construct.
|
|
|
- private PipelineAckProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private PipelineAckProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final PipelineAckProto defaultInstance;
|
|
|
- public static PipelineAckProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public PipelineAckProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required sint64 seqno = 1;
|
|
|
- public static final int SEQNO_FIELD_NUMBER = 1;
|
|
|
- private long seqno_;
|
|
|
- public boolean hasSeqno() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public long getSeqno() {
|
|
|
- return seqno_;
|
|
|
- }
|
|
|
-
|
|
|
- // repeated .Status status = 2;
|
|
|
- public static final int STATUS_FIELD_NUMBER = 2;
|
|
|
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> status_;
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> getStatusList() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
- public int getStatusCount() {
|
|
|
- return status_.size();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(int index) {
|
|
|
- return status_.get(index);
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- seqno_ = 0L;
|
|
|
- status_ = java.util.Collections.emptyList();
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasSeqno()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeSInt64(1, seqno_);
|
|
|
- }
|
|
|
- for (int i = 0; i < status_.size(); i++) {
|
|
|
- output.writeEnum(2, status_.get(i).getNumber());
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeSInt64Size(1, seqno_);
|
|
|
- }
|
|
|
- {
|
|
|
- int dataSize = 0;
|
|
|
- for (int i = 0; i < status_.size(); i++) {
|
|
|
- dataSize += com.google.protobuf.CodedOutputStream
|
|
|
- .computeEnumSizeNoTag(status_.get(i).getNumber());
|
|
|
- }
|
|
|
- size += dataSize;
|
|
|
- size += 1 * status_.size();
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasSeqno() == other.hasSeqno());
|
|
|
- if (hasSeqno()) {
|
|
|
- result = result && (getSeqno()
|
|
|
- == other.getSeqno());
|
|
|
- }
|
|
|
- result = result && getStatusList()
|
|
|
- .equals(other.getStatusList());
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasSeqno()) {
|
|
|
- hash = (37 * hash) + SEQNO_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getSeqno());
|
|
|
- }
|
|
|
- if (getStatusCount() > 0) {
|
|
|
- hash = (37 * hash) + STATUS_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashEnumList(getStatusList());
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_PipelineAckProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- seqno_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- status_ = java.util.Collections.emptyList();
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.seqno_ = seqno_;
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- status_ = java.util.Collections.unmodifiableList(status_);
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- }
|
|
|
- result.status_ = status_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasSeqno()) {
|
|
|
- setSeqno(other.getSeqno());
|
|
|
- }
|
|
|
- if (!other.status_.isEmpty()) {
|
|
|
- if (status_.isEmpty()) {
|
|
|
- status_ = other.status_;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- } else {
|
|
|
- ensureStatusIsMutable();
|
|
|
- status_.addAll(other.status_);
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasSeqno()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 8: {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- seqno_ = input.readSInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 16: {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(2, rawValue);
|
|
|
- } else {
|
|
|
- addStatus(value);
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- int length = input.readRawVarint32();
|
|
|
- int oldLimit = input.pushLimit(length);
|
|
|
- while(input.getBytesUntilLimit() > 0) {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(2, rawValue);
|
|
|
- } else {
|
|
|
- addStatus(value);
|
|
|
- }
|
|
|
- }
|
|
|
- input.popLimit(oldLimit);
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required sint64 seqno = 1;
|
|
|
- private long seqno_ ;
|
|
|
- public boolean hasSeqno() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public long getSeqno() {
|
|
|
- return seqno_;
|
|
|
- }
|
|
|
- public Builder setSeqno(long value) {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- seqno_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearSeqno() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- seqno_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // repeated .Status status = 2;
|
|
|
- private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> status_ =
|
|
|
- java.util.Collections.emptyList();
|
|
|
- private void ensureStatusIsMutable() {
|
|
|
- if (!((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- status_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(status_);
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- }
|
|
|
- public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> getStatusList() {
|
|
|
- return java.util.Collections.unmodifiableList(status_);
|
|
|
- }
|
|
|
- public int getStatusCount() {
|
|
|
- return status_.size();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus(int index) {
|
|
|
- return status_.get(index);
|
|
|
- }
|
|
|
- public Builder setStatus(
|
|
|
- int index, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureStatusIsMutable();
|
|
|
- status_.set(index, value);
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- ensureStatusIsMutable();
|
|
|
- status_.add(value);
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder addAllStatus(
|
|
|
- java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> values) {
|
|
|
- ensureStatusIsMutable();
|
|
|
- super.addAll(values, status_);
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearStatus() {
|
|
|
- status_ = java.util.Collections.emptyList();
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:PipelineAckProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new PipelineAckProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:PipelineAckProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface ReadOpChecksumInfoProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .ChecksumProto checksum = 1;
|
|
|
- boolean hasChecksum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder();
|
|
|
-
|
|
|
- // required uint64 chunkOffset = 2;
|
|
|
- boolean hasChunkOffset();
|
|
|
- long getChunkOffset();
|
|
|
- }
|
|
|
- public static final class ReadOpChecksumInfoProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements ReadOpChecksumInfoProtoOrBuilder {
|
|
|
- // Use ReadOpChecksumInfoProto.newBuilder() to construct.
|
|
|
- private ReadOpChecksumInfoProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private ReadOpChecksumInfoProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final ReadOpChecksumInfoProto defaultInstance;
|
|
|
- public static ReadOpChecksumInfoProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public ReadOpChecksumInfoProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .ChecksumProto checksum = 1;
|
|
|
- public static final int CHECKSUM_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
|
|
|
- public boolean hasChecksum() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
|
|
|
- return checksum_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
|
|
|
- return checksum_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 chunkOffset = 2;
|
|
|
- public static final int CHUNKOFFSET_FIELD_NUMBER = 2;
|
|
|
- private long chunkOffset_;
|
|
|
- public boolean hasChunkOffset() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getChunkOffset() {
|
|
|
- return chunkOffset_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- chunkOffset_ = 0L;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasChecksum()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasChunkOffset()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getChecksum().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeMessage(1, checksum_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeUInt64(2, chunkOffset_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(1, checksum_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(2, chunkOffset_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasChecksum() == other.hasChecksum());
|
|
|
- if (hasChecksum()) {
|
|
|
- result = result && getChecksum()
|
|
|
- .equals(other.getChecksum());
|
|
|
- }
|
|
|
- result = result && (hasChunkOffset() == other.hasChunkOffset());
|
|
|
- if (hasChunkOffset()) {
|
|
|
- result = result && (getChunkOffset()
|
|
|
- == other.getChunkOffset());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasChecksum()) {
|
|
|
- hash = (37 * hash) + CHECKSUM_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getChecksum().hashCode();
|
|
|
- }
|
|
|
- if (hasChunkOffset()) {
|
|
|
- hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getChunkOffset());
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ReadOpChecksumInfoProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getChecksumFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- checksumBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- chunkOffset_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- result.checksum_ = checksum_;
|
|
|
- } else {
|
|
|
- result.checksum_ = checksumBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.chunkOffset_ = chunkOffset_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasChecksum()) {
|
|
|
- mergeChecksum(other.getChecksum());
|
|
|
- }
|
|
|
- if (other.hasChunkOffset()) {
|
|
|
- setChunkOffset(other.getChunkOffset());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasChecksum()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasChunkOffset()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!getChecksum().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 10: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder();
|
|
|
- if (hasChecksum()) {
|
|
|
- subBuilder.mergeFrom(getChecksum());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setChecksum(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 16: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- chunkOffset_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .ChecksumProto checksum = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> checksumBuilder_;
|
|
|
- public boolean hasChecksum() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- return checksum_;
|
|
|
- } else {
|
|
|
- return checksumBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- checksum_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setChecksum(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- checksum_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
- checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
|
|
|
- checksum_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(checksum_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- checksum_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearChecksum() {
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- onChanged();
|
|
|
- return getChecksumFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
|
|
|
- if (checksumBuilder_ != null) {
|
|
|
- return checksumBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return checksum_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>
|
|
|
- getChecksumFieldBuilder() {
|
|
|
- if (checksumBuilder_ == null) {
|
|
|
- checksumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
|
|
|
- checksum_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- checksum_ = null;
|
|
|
- }
|
|
|
- return checksumBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 chunkOffset = 2;
|
|
|
- private long chunkOffset_ ;
|
|
|
- public boolean hasChunkOffset() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getChunkOffset() {
|
|
|
- return chunkOffset_;
|
|
|
- }
|
|
|
- public Builder setChunkOffset(long value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- chunkOffset_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearChunkOffset() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- chunkOffset_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:ReadOpChecksumInfoProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new ReadOpChecksumInfoProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:ReadOpChecksumInfoProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface BlockOpResponseProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .Status status = 1;
|
|
|
- boolean hasStatus();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
|
|
|
-
|
|
|
- // optional string firstBadLink = 2;
|
|
|
- boolean hasFirstBadLink();
|
|
|
- String getFirstBadLink();
|
|
|
-
|
|
|
- // optional .OpBlockChecksumResponseProto checksumResponse = 3;
|
|
|
- boolean hasChecksumResponse();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder();
|
|
|
-
|
|
|
- // optional .ReadOpChecksumInfoProto readOpChecksumInfo = 4;
|
|
|
- boolean hasReadOpChecksumInfo();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder();
|
|
|
-
|
|
|
- // optional string message = 5;
|
|
|
- boolean hasMessage();
|
|
|
- String getMessage();
|
|
|
- }
|
|
|
- public static final class BlockOpResponseProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements BlockOpResponseProtoOrBuilder {
|
|
|
- // Use BlockOpResponseProto.newBuilder() to construct.
|
|
|
- private BlockOpResponseProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private BlockOpResponseProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final BlockOpResponseProto defaultInstance;
|
|
|
- public static BlockOpResponseProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public BlockOpResponseProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .Status status = 1;
|
|
|
- public static final int STATUS_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
|
|
|
- public boolean hasStatus() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional string firstBadLink = 2;
|
|
|
- public static final int FIRSTBADLINK_FIELD_NUMBER = 2;
|
|
|
- private java.lang.Object firstBadLink_;
|
|
|
- public boolean hasFirstBadLink() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public String getFirstBadLink() {
|
|
|
- java.lang.Object ref = firstBadLink_;
|
|
|
- if (ref instanceof String) {
|
|
|
- return (String) ref;
|
|
|
- } else {
|
|
|
- com.google.protobuf.ByteString bs =
|
|
|
- (com.google.protobuf.ByteString) ref;
|
|
|
- String s = bs.toStringUtf8();
|
|
|
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
- firstBadLink_ = s;
|
|
|
- }
|
|
|
- return s;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.ByteString getFirstBadLinkBytes() {
|
|
|
- java.lang.Object ref = firstBadLink_;
|
|
|
- if (ref instanceof String) {
|
|
|
- com.google.protobuf.ByteString b =
|
|
|
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
- firstBadLink_ = b;
|
|
|
- return b;
|
|
|
- } else {
|
|
|
- return (com.google.protobuf.ByteString) ref;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // optional .OpBlockChecksumResponseProto checksumResponse = 3;
|
|
|
- public static final int CHECKSUMRESPONSE_FIELD_NUMBER = 3;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
|
|
|
- public boolean hasChecksumResponse() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
|
|
|
- return checksumResponse_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
|
|
|
- return checksumResponse_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional .ReadOpChecksumInfoProto readOpChecksumInfo = 4;
|
|
|
- public static final int READOPCHECKSUMINFO_FIELD_NUMBER = 4;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
|
|
|
- public boolean hasReadOpChecksumInfo() {
|
|
|
- return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
|
|
|
- return readOpChecksumInfo_;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
|
|
|
- return readOpChecksumInfo_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional string message = 5;
|
|
|
- public static final int MESSAGE_FIELD_NUMBER = 5;
|
|
|
- private java.lang.Object message_;
|
|
|
- public boolean hasMessage() {
|
|
|
- return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
- }
|
|
|
- public String getMessage() {
|
|
|
- java.lang.Object ref = message_;
|
|
|
- if (ref instanceof String) {
|
|
|
- return (String) ref;
|
|
|
- } else {
|
|
|
- com.google.protobuf.ByteString bs =
|
|
|
- (com.google.protobuf.ByteString) ref;
|
|
|
- String s = bs.toStringUtf8();
|
|
|
- if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
- message_ = s;
|
|
|
- }
|
|
|
- return s;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.ByteString getMessageBytes() {
|
|
|
- java.lang.Object ref = message_;
|
|
|
- if (ref instanceof String) {
|
|
|
- com.google.protobuf.ByteString b =
|
|
|
- com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
- message_ = b;
|
|
|
- return b;
|
|
|
- } else {
|
|
|
- return (com.google.protobuf.ByteString) ref;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- firstBadLink_ = "";
|
|
|
- checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
|
|
|
- readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
|
|
|
- message_ = "";
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasStatus()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (hasChecksumResponse()) {
|
|
|
- if (!getChecksumResponse().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- if (hasReadOpChecksumInfo()) {
|
|
|
- if (!getReadOpChecksumInfo().isInitialized()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeEnum(1, status_.getNumber());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeBytes(2, getFirstBadLinkBytes());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeMessage(3, checksumResponse_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- output.writeMessage(4, readOpChecksumInfo_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
- output.writeBytes(5, getMessageBytes());
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeEnumSize(1, status_.getNumber());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeBytesSize(2, getFirstBadLinkBytes());
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(3, checksumResponse_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeMessageSize(4, readOpChecksumInfo_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeBytesSize(5, getMessageBytes());
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasStatus() == other.hasStatus());
|
|
|
- if (hasStatus()) {
|
|
|
- result = result &&
|
|
|
- (getStatus() == other.getStatus());
|
|
|
- }
|
|
|
- result = result && (hasFirstBadLink() == other.hasFirstBadLink());
|
|
|
- if (hasFirstBadLink()) {
|
|
|
- result = result && getFirstBadLink()
|
|
|
- .equals(other.getFirstBadLink());
|
|
|
- }
|
|
|
- result = result && (hasChecksumResponse() == other.hasChecksumResponse());
|
|
|
- if (hasChecksumResponse()) {
|
|
|
- result = result && getChecksumResponse()
|
|
|
- .equals(other.getChecksumResponse());
|
|
|
- }
|
|
|
- result = result && (hasReadOpChecksumInfo() == other.hasReadOpChecksumInfo());
|
|
|
- if (hasReadOpChecksumInfo()) {
|
|
|
- result = result && getReadOpChecksumInfo()
|
|
|
- .equals(other.getReadOpChecksumInfo());
|
|
|
- }
|
|
|
- result = result && (hasMessage() == other.hasMessage());
|
|
|
- if (hasMessage()) {
|
|
|
- result = result && getMessage()
|
|
|
- .equals(other.getMessage());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasStatus()) {
|
|
|
- hash = (37 * hash) + STATUS_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashEnum(getStatus());
|
|
|
- }
|
|
|
- if (hasFirstBadLink()) {
|
|
|
- hash = (37 * hash) + FIRSTBADLINK_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getFirstBadLink().hashCode();
|
|
|
- }
|
|
|
- if (hasChecksumResponse()) {
|
|
|
- hash = (37 * hash) + CHECKSUMRESPONSE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getChecksumResponse().hashCode();
|
|
|
- }
|
|
|
- if (hasReadOpChecksumInfo()) {
|
|
|
- hash = (37 * hash) + READOPCHECKSUMINFO_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getReadOpChecksumInfo().hashCode();
|
|
|
- }
|
|
|
- if (hasMessage()) {
|
|
|
- hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getMessage().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_BlockOpResponseProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- getChecksumResponseFieldBuilder();
|
|
|
- getReadOpChecksumInfoFieldBuilder();
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- firstBadLink_ = "";
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- checksumResponseBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
|
|
|
- } else {
|
|
|
- readOpChecksumInfoBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- message_ = "";
|
|
|
- bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.status_ = status_;
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.firstBadLink_ = firstBadLink_;
|
|
|
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- to_bitField0_ |= 0x00000004;
|
|
|
- }
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- result.checksumResponse_ = checksumResponse_;
|
|
|
- } else {
|
|
|
- result.checksumResponse_ = checksumResponseBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- to_bitField0_ |= 0x00000008;
|
|
|
- }
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- result.readOpChecksumInfo_ = readOpChecksumInfo_;
|
|
|
- } else {
|
|
|
- result.readOpChecksumInfo_ = readOpChecksumInfoBuilder_.build();
|
|
|
- }
|
|
|
- if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
- to_bitField0_ |= 0x00000010;
|
|
|
- }
|
|
|
- result.message_ = message_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasStatus()) {
|
|
|
- setStatus(other.getStatus());
|
|
|
- }
|
|
|
- if (other.hasFirstBadLink()) {
|
|
|
- setFirstBadLink(other.getFirstBadLink());
|
|
|
- }
|
|
|
- if (other.hasChecksumResponse()) {
|
|
|
- mergeChecksumResponse(other.getChecksumResponse());
|
|
|
- }
|
|
|
- if (other.hasReadOpChecksumInfo()) {
|
|
|
- mergeReadOpChecksumInfo(other.getReadOpChecksumInfo());
|
|
|
- }
|
|
|
- if (other.hasMessage()) {
|
|
|
- setMessage(other.getMessage());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasStatus()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (hasChecksumResponse()) {
|
|
|
- if (!getChecksumResponse().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- if (hasReadOpChecksumInfo()) {
|
|
|
- if (!getReadOpChecksumInfo().isInitialized()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 8: {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(1, rawValue);
|
|
|
- } else {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- status_ = value;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 18: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- firstBadLink_ = input.readBytes();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 26: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder();
|
|
|
- if (hasChecksumResponse()) {
|
|
|
- subBuilder.mergeFrom(getChecksumResponse());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setChecksumResponse(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 34: {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder();
|
|
|
- if (hasReadOpChecksumInfo()) {
|
|
|
- subBuilder.mergeFrom(getReadOpChecksumInfo());
|
|
|
- }
|
|
|
- input.readMessage(subBuilder, extensionRegistry);
|
|
|
- setReadOpChecksumInfo(subBuilder.buildPartial());
|
|
|
- break;
|
|
|
- }
|
|
|
- case 42: {
|
|
|
- bitField0_ |= 0x00000010;
|
|
|
- message_ = input.readBytes();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .Status status = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- public boolean hasStatus() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
- public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- status_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearStatus() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // optional string firstBadLink = 2;
|
|
|
- private java.lang.Object firstBadLink_ = "";
|
|
|
- public boolean hasFirstBadLink() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public String getFirstBadLink() {
|
|
|
- java.lang.Object ref = firstBadLink_;
|
|
|
- if (!(ref instanceof String)) {
|
|
|
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
- firstBadLink_ = s;
|
|
|
- return s;
|
|
|
- } else {
|
|
|
- return (String) ref;
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setFirstBadLink(String value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- firstBadLink_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearFirstBadLink() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- firstBadLink_ = getDefaultInstance().getFirstBadLink();
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- void setFirstBadLink(com.google.protobuf.ByteString value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- firstBadLink_ = value;
|
|
|
- onChanged();
|
|
|
- }
|
|
|
-
|
|
|
- // optional .OpBlockChecksumResponseProto checksumResponse = 3;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> checksumResponseBuilder_;
|
|
|
- public boolean hasChecksumResponse() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- return checksumResponse_;
|
|
|
- } else {
|
|
|
- return checksumResponseBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- checksumResponse_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumResponseBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setChecksumResponse(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder builderForValue) {
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- checksumResponse_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumResponseBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004) &&
|
|
|
- checksumResponse_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) {
|
|
|
- checksumResponse_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder(checksumResponse_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- checksumResponse_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumResponseBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearChecksumResponse() {
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- checksumResponse_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- checksumResponseBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder getChecksumResponseBuilder() {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- onChanged();
|
|
|
- return getChecksumResponseFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
|
|
|
- if (checksumResponseBuilder_ != null) {
|
|
|
- return checksumResponseBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return checksumResponse_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>
|
|
|
- getChecksumResponseFieldBuilder() {
|
|
|
- if (checksumResponseBuilder_ == null) {
|
|
|
- checksumResponseBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>(
|
|
|
- checksumResponse_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- checksumResponse_ = null;
|
|
|
- }
|
|
|
- return checksumResponseBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional .ReadOpChecksumInfoProto readOpChecksumInfo = 4;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> readOpChecksumInfoBuilder_;
|
|
|
- public boolean hasReadOpChecksumInfo() {
|
|
|
- return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- return readOpChecksumInfo_;
|
|
|
- } else {
|
|
|
- return readOpChecksumInfoBuilder_.getMessage();
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- readOpChecksumInfo_ = value;
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- readOpChecksumInfoBuilder_.setMessage(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder setReadOpChecksumInfo(
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder builderForValue) {
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- readOpChecksumInfo_ = builderForValue.build();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- readOpChecksumInfoBuilder_.setMessage(builderForValue.build());
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- if (((bitField0_ & 0x00000008) == 0x00000008) &&
|
|
|
- readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) {
|
|
|
- readOpChecksumInfo_ =
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder(readOpChecksumInfo_).mergeFrom(value).buildPartial();
|
|
|
- } else {
|
|
|
- readOpChecksumInfo_ = value;
|
|
|
- }
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- readOpChecksumInfoBuilder_.mergeFrom(value);
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearReadOpChecksumInfo() {
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- readOpChecksumInfo_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
|
|
|
- onChanged();
|
|
|
- } else {
|
|
|
- readOpChecksumInfoBuilder_.clear();
|
|
|
- }
|
|
|
- bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- return this;
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder getReadOpChecksumInfoBuilder() {
|
|
|
- bitField0_ |= 0x00000008;
|
|
|
- onChanged();
|
|
|
- return getReadOpChecksumInfoFieldBuilder().getBuilder();
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
|
|
|
- if (readOpChecksumInfoBuilder_ != null) {
|
|
|
- return readOpChecksumInfoBuilder_.getMessageOrBuilder();
|
|
|
- } else {
|
|
|
- return readOpChecksumInfo_;
|
|
|
- }
|
|
|
- }
|
|
|
- private com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>
|
|
|
- getReadOpChecksumInfoFieldBuilder() {
|
|
|
- if (readOpChecksumInfoBuilder_ == null) {
|
|
|
- readOpChecksumInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>(
|
|
|
- readOpChecksumInfo_,
|
|
|
- getParentForChildren(),
|
|
|
- isClean());
|
|
|
- readOpChecksumInfo_ = null;
|
|
|
- }
|
|
|
- return readOpChecksumInfoBuilder_;
|
|
|
- }
|
|
|
-
|
|
|
- // optional string message = 5;
|
|
|
- private java.lang.Object message_ = "";
|
|
|
- public boolean hasMessage() {
|
|
|
- return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
- }
|
|
|
- public String getMessage() {
|
|
|
- java.lang.Object ref = message_;
|
|
|
- if (!(ref instanceof String)) {
|
|
|
- String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
- message_ = s;
|
|
|
- return s;
|
|
|
- } else {
|
|
|
- return (String) ref;
|
|
|
- }
|
|
|
- }
|
|
|
- public Builder setMessage(String value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000010;
|
|
|
- message_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearMessage() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
- message_ = getDefaultInstance().getMessage();
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- void setMessage(com.google.protobuf.ByteString value) {
|
|
|
- bitField0_ |= 0x00000010;
|
|
|
- message_ = value;
|
|
|
- onChanged();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:BlockOpResponseProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new BlockOpResponseProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:BlockOpResponseProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface ClientReadStatusProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .Status status = 1;
|
|
|
- boolean hasStatus();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
|
|
|
- }
|
|
|
- public static final class ClientReadStatusProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements ClientReadStatusProtoOrBuilder {
|
|
|
- // Use ClientReadStatusProto.newBuilder() to construct.
|
|
|
- private ClientReadStatusProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private ClientReadStatusProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final ClientReadStatusProto defaultInstance;
|
|
|
- public static ClientReadStatusProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public ClientReadStatusProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .Status status = 1;
|
|
|
- public static final int STATUS_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
|
|
|
- public boolean hasStatus() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasStatus()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeEnum(1, status_.getNumber());
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeEnumSize(1, status_.getNumber());
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasStatus() == other.hasStatus());
|
|
|
- if (hasStatus()) {
|
|
|
- result = result &&
|
|
|
- (getStatus() == other.getStatus());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasStatus()) {
|
|
|
- hash = (37 * hash) + STATUS_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashEnum(getStatus());
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_ClientReadStatusProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.status_ = status_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasStatus()) {
|
|
|
- setStatus(other.getStatus());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasStatus()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 8: {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(1, rawValue);
|
|
|
- } else {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- status_ = value;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .Status status = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- public boolean hasStatus() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
- public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- status_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearStatus() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:ClientReadStatusProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new ClientReadStatusProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:ClientReadStatusProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface DNTransferAckProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required .Status status = 1;
|
|
|
- boolean hasStatus();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
|
|
|
- }
|
|
|
- public static final class DNTransferAckProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements DNTransferAckProtoOrBuilder {
|
|
|
- // Use DNTransferAckProto.newBuilder() to construct.
|
|
|
- private DNTransferAckProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private DNTransferAckProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final DNTransferAckProto defaultInstance;
|
|
|
- public static DNTransferAckProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public DNTransferAckProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required .Status status = 1;
|
|
|
- public static final int STATUS_FIELD_NUMBER = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_;
|
|
|
- public boolean hasStatus() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasStatus()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeEnum(1, status_.getNumber());
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeEnumSize(1, status_.getNumber());
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasStatus() == other.hasStatus());
|
|
|
- if (hasStatus()) {
|
|
|
- result = result &&
|
|
|
- (getStatus() == other.getStatus());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasStatus()) {
|
|
|
- hash = (37 * hash) + STATUS_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashEnum(getStatus());
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_DNTransferAckProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.status_ = status_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasStatus()) {
|
|
|
- setStatus(other.getStatus());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasStatus()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 8: {
|
|
|
- int rawValue = input.readEnum();
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.valueOf(rawValue);
|
|
|
- if (value == null) {
|
|
|
- unknownFields.mergeVarintField(1, rawValue);
|
|
|
- } else {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- status_ = value;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required .Status status = 1;
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- public boolean hasStatus() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
|
|
|
- return status_;
|
|
|
- }
|
|
|
- public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- status_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearStatus() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- status_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:DNTransferAckProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new DNTransferAckProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:DNTransferAckProto)
|
|
|
- }
|
|
|
-
|
|
|
- public interface OpBlockChecksumResponseProtoOrBuilder
|
|
|
- extends com.google.protobuf.MessageOrBuilder {
|
|
|
-
|
|
|
- // required uint32 bytesPerCrc = 1;
|
|
|
- boolean hasBytesPerCrc();
|
|
|
- int getBytesPerCrc();
|
|
|
-
|
|
|
- // required uint64 crcPerBlock = 2;
|
|
|
- boolean hasCrcPerBlock();
|
|
|
- long getCrcPerBlock();
|
|
|
-
|
|
|
- // required bytes md5 = 3;
|
|
|
- boolean hasMd5();
|
|
|
- com.google.protobuf.ByteString getMd5();
|
|
|
- }
|
|
|
- public static final class OpBlockChecksumResponseProto extends
|
|
|
- com.google.protobuf.GeneratedMessage
|
|
|
- implements OpBlockChecksumResponseProtoOrBuilder {
|
|
|
- // Use OpBlockChecksumResponseProto.newBuilder() to construct.
|
|
|
- private OpBlockChecksumResponseProto(Builder builder) {
|
|
|
- super(builder);
|
|
|
- }
|
|
|
- private OpBlockChecksumResponseProto(boolean noInit) {}
|
|
|
-
|
|
|
- private static final OpBlockChecksumResponseProto defaultInstance;
|
|
|
- public static OpBlockChecksumResponseProto getDefaultInstance() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public OpBlockChecksumResponseProto getDefaultInstanceForType() {
|
|
|
- return defaultInstance;
|
|
|
- }
|
|
|
-
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
- // required uint32 bytesPerCrc = 1;
|
|
|
- public static final int BYTESPERCRC_FIELD_NUMBER = 1;
|
|
|
- private int bytesPerCrc_;
|
|
|
- public boolean hasBytesPerCrc() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public int getBytesPerCrc() {
|
|
|
- return bytesPerCrc_;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 crcPerBlock = 2;
|
|
|
- public static final int CRCPERBLOCK_FIELD_NUMBER = 2;
|
|
|
- private long crcPerBlock_;
|
|
|
- public boolean hasCrcPerBlock() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getCrcPerBlock() {
|
|
|
- return crcPerBlock_;
|
|
|
- }
|
|
|
-
|
|
|
- // required bytes md5 = 3;
|
|
|
- public static final int MD5_FIELD_NUMBER = 3;
|
|
|
- private com.google.protobuf.ByteString md5_;
|
|
|
- public boolean hasMd5() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public com.google.protobuf.ByteString getMd5() {
|
|
|
- return md5_;
|
|
|
- }
|
|
|
-
|
|
|
- private void initFields() {
|
|
|
- bytesPerCrc_ = 0;
|
|
|
- crcPerBlock_ = 0L;
|
|
|
- md5_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
- }
|
|
|
- private byte memoizedIsInitialized = -1;
|
|
|
- public final boolean isInitialized() {
|
|
|
- byte isInitialized = memoizedIsInitialized;
|
|
|
- if (isInitialized != -1) return isInitialized == 1;
|
|
|
-
|
|
|
- if (!hasBytesPerCrc()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasCrcPerBlock()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasMd5()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
- memoizedIsInitialized = 1;
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
- throws java.io.IOException {
|
|
|
- getSerializedSize();
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- output.writeUInt32(1, bytesPerCrc_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- output.writeUInt64(2, crcPerBlock_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeBytes(3, md5_);
|
|
|
- }
|
|
|
- getUnknownFields().writeTo(output);
|
|
|
- }
|
|
|
-
|
|
|
- private int memoizedSerializedSize = -1;
|
|
|
- public int getSerializedSize() {
|
|
|
- int size = memoizedSerializedSize;
|
|
|
- if (size != -1) return size;
|
|
|
-
|
|
|
- size = 0;
|
|
|
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt32Size(1, bytesPerCrc_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(2, crcPerBlock_);
|
|
|
- }
|
|
|
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeBytesSize(3, md5_);
|
|
|
- }
|
|
|
- size += getUnknownFields().getSerializedSize();
|
|
|
- memoizedSerializedSize = size;
|
|
|
- return size;
|
|
|
- }
|
|
|
-
|
|
|
- private static final long serialVersionUID = 0L;
|
|
|
- @java.lang.Override
|
|
|
- protected java.lang.Object writeReplace()
|
|
|
- throws java.io.ObjectStreamException {
|
|
|
- return super.writeReplace();
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public boolean equals(final java.lang.Object obj) {
|
|
|
- if (obj == this) {
|
|
|
- return true;
|
|
|
- }
|
|
|
- if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)) {
|
|
|
- return super.equals(obj);
|
|
|
- }
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) obj;
|
|
|
-
|
|
|
- boolean result = true;
|
|
|
- result = result && (hasBytesPerCrc() == other.hasBytesPerCrc());
|
|
|
- if (hasBytesPerCrc()) {
|
|
|
- result = result && (getBytesPerCrc()
|
|
|
- == other.getBytesPerCrc());
|
|
|
- }
|
|
|
- result = result && (hasCrcPerBlock() == other.hasCrcPerBlock());
|
|
|
- if (hasCrcPerBlock()) {
|
|
|
- result = result && (getCrcPerBlock()
|
|
|
- == other.getCrcPerBlock());
|
|
|
- }
|
|
|
- result = result && (hasMd5() == other.hasMd5());
|
|
|
- if (hasMd5()) {
|
|
|
- result = result && getMd5()
|
|
|
- .equals(other.getMd5());
|
|
|
- }
|
|
|
- result = result &&
|
|
|
- getUnknownFields().equals(other.getUnknownFields());
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- public int hashCode() {
|
|
|
- int hash = 41;
|
|
|
- hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
- if (hasBytesPerCrc()) {
|
|
|
- hash = (37 * hash) + BYTESPERCRC_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getBytesPerCrc();
|
|
|
- }
|
|
|
- if (hasCrcPerBlock()) {
|
|
|
- hash = (37 * hash) + CRCPERBLOCK_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getCrcPerBlock());
|
|
|
- }
|
|
|
- if (hasMd5()) {
|
|
|
- hash = (37 * hash) + MD5_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + getMd5().hashCode();
|
|
|
- }
|
|
|
- hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
- return hash;
|
|
|
- }
|
|
|
-
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
|
|
|
- com.google.protobuf.ByteString data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(byte[] data)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
|
|
|
- byte[] data,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(
|
|
|
- java.io.InputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- Builder builder = newBuilder();
|
|
|
- if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
- return builder.buildParsed();
|
|
|
- } else {
|
|
|
- return null;
|
|
|
- }
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input).buildParsed();
|
|
|
- }
|
|
|
- public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
- .buildParsed();
|
|
|
- }
|
|
|
-
|
|
|
- public static Builder newBuilder() { return Builder.create(); }
|
|
|
- public Builder newBuilderForType() { return newBuilder(); }
|
|
|
- public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto prototype) {
|
|
|
- return newBuilder().mergeFrom(prototype);
|
|
|
- }
|
|
|
- public Builder toBuilder() { return newBuilder(this); }
|
|
|
-
|
|
|
- @java.lang.Override
|
|
|
- protected Builder newBuilderForType(
|
|
|
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
- Builder builder = new Builder(parent);
|
|
|
- return builder;
|
|
|
- }
|
|
|
- public static final class Builder extends
|
|
|
- com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
- implements org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder {
|
|
|
- public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptor() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_descriptor;
|
|
|
- }
|
|
|
-
|
|
|
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internalGetFieldAccessorTable() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_OpBlockChecksumResponseProto_fieldAccessorTable;
|
|
|
- }
|
|
|
-
|
|
|
- // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder()
|
|
|
- private Builder() {
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
-
|
|
|
- private Builder(BuilderParent parent) {
|
|
|
- super(parent);
|
|
|
- maybeForceBuilderInitialization();
|
|
|
- }
|
|
|
- private void maybeForceBuilderInitialization() {
|
|
|
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
- }
|
|
|
- }
|
|
|
- private static Builder create() {
|
|
|
- return new Builder();
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clear() {
|
|
|
- super.clear();
|
|
|
- bytesPerCrc_ = 0;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- crcPerBlock_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- md5_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder clone() {
|
|
|
- return create().mergeFrom(buildPartial());
|
|
|
- }
|
|
|
-
|
|
|
- public com.google.protobuf.Descriptors.Descriptor
|
|
|
- getDescriptorForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDescriptor();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
|
|
|
- return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto build() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(result);
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildParsed()
|
|
|
- throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial();
|
|
|
- if (!result.isInitialized()) {
|
|
|
- throw newUninitializedMessageException(
|
|
|
- result).asInvalidProtocolBufferException();
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildPartial() {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto(this);
|
|
|
- int from_bitField0_ = bitField0_;
|
|
|
- int to_bitField0_ = 0;
|
|
|
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
- to_bitField0_ |= 0x00000001;
|
|
|
- }
|
|
|
- result.bytesPerCrc_ = bytesPerCrc_;
|
|
|
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
- to_bitField0_ |= 0x00000002;
|
|
|
- }
|
|
|
- result.crcPerBlock_ = crcPerBlock_;
|
|
|
- if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- to_bitField0_ |= 0x00000004;
|
|
|
- }
|
|
|
- result.md5_ = md5_;
|
|
|
- result.bitField0_ = to_bitField0_;
|
|
|
- onBuilt();
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
- if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) {
|
|
|
- return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)other);
|
|
|
- } else {
|
|
|
- super.mergeFrom(other);
|
|
|
- return this;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other) {
|
|
|
- if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) return this;
|
|
|
- if (other.hasBytesPerCrc()) {
|
|
|
- setBytesPerCrc(other.getBytesPerCrc());
|
|
|
- }
|
|
|
- if (other.hasCrcPerBlock()) {
|
|
|
- setCrcPerBlock(other.getCrcPerBlock());
|
|
|
- }
|
|
|
- if (other.hasMd5()) {
|
|
|
- setMd5(other.getMd5());
|
|
|
- }
|
|
|
- this.mergeUnknownFields(other.getUnknownFields());
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- public final boolean isInitialized() {
|
|
|
- if (!hasBytesPerCrc()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasCrcPerBlock()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- if (!hasMd5()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- public Builder mergeFrom(
|
|
|
- com.google.protobuf.CodedInputStream input,
|
|
|
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
- throws java.io.IOException {
|
|
|
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
- com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
- this.getUnknownFields());
|
|
|
- while (true) {
|
|
|
- int tag = input.readTag();
|
|
|
- switch (tag) {
|
|
|
- case 0:
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- default: {
|
|
|
- if (!parseUnknownField(input, unknownFields,
|
|
|
- extensionRegistry, tag)) {
|
|
|
- this.setUnknownFields(unknownFields.build());
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
- case 8: {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- bytesPerCrc_ = input.readUInt32();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 16: {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- crcPerBlock_ = input.readUInt64();
|
|
|
- break;
|
|
|
- }
|
|
|
- case 26: {
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- md5_ = input.readBytes();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- private int bitField0_;
|
|
|
-
|
|
|
- // required uint32 bytesPerCrc = 1;
|
|
|
- private int bytesPerCrc_ ;
|
|
|
- public boolean hasBytesPerCrc() {
|
|
|
- return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
- }
|
|
|
- public int getBytesPerCrc() {
|
|
|
- return bytesPerCrc_;
|
|
|
- }
|
|
|
- public Builder setBytesPerCrc(int value) {
|
|
|
- bitField0_ |= 0x00000001;
|
|
|
- bytesPerCrc_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearBytesPerCrc() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
- bytesPerCrc_ = 0;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required uint64 crcPerBlock = 2;
|
|
|
- private long crcPerBlock_ ;
|
|
|
- public boolean hasCrcPerBlock() {
|
|
|
- return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
- }
|
|
|
- public long getCrcPerBlock() {
|
|
|
- return crcPerBlock_;
|
|
|
- }
|
|
|
- public Builder setCrcPerBlock(long value) {
|
|
|
- bitField0_ |= 0x00000002;
|
|
|
- crcPerBlock_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearCrcPerBlock() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- crcPerBlock_ = 0L;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // required bytes md5 = 3;
|
|
|
- private com.google.protobuf.ByteString md5_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
- public boolean hasMd5() {
|
|
|
- return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
- }
|
|
|
- public com.google.protobuf.ByteString getMd5() {
|
|
|
- return md5_;
|
|
|
- }
|
|
|
- public Builder setMd5(com.google.protobuf.ByteString value) {
|
|
|
- if (value == null) {
|
|
|
- throw new NullPointerException();
|
|
|
- }
|
|
|
- bitField0_ |= 0x00000004;
|
|
|
- md5_ = value;
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
- public Builder clearMd5() {
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- md5_ = getDefaultInstance().getMd5();
|
|
|
- onChanged();
|
|
|
- return this;
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(builder_scope:OpBlockChecksumResponseProto)
|
|
|
- }
|
|
|
-
|
|
|
- static {
|
|
|
- defaultInstance = new OpBlockChecksumResponseProto(true);
|
|
|
- defaultInstance.initFields();
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(class_scope:OpBlockChecksumResponseProto)
|
|
|
- }
|
|
|
-
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_BaseHeaderProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_BaseHeaderProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_ClientOperationHeaderProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_ClientOperationHeaderProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpReadBlockProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpReadBlockProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_ChecksumProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_ChecksumProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpWriteBlockProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpWriteBlockProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpTransferBlockProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpTransferBlockProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpReplaceBlockProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpReplaceBlockProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpCopyBlockProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpCopyBlockProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpBlockChecksumProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpBlockChecksumProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_PacketHeaderProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_PacketHeaderProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_PipelineAckProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_PipelineAckProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_ReadOpChecksumInfoProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_ReadOpChecksumInfoProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_BlockOpResponseProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_BlockOpResponseProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_ClientReadStatusProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_ClientReadStatusProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_DNTransferAckProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_DNTransferAckProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_OpBlockChecksumResponseProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_OpBlockChecksumResponseProto_fieldAccessorTable;
|
|
|
-
|
|
|
- public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
- getDescriptor() {
|
|
|
- return descriptor;
|
|
|
- }
|
|
|
- private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
- descriptor;
|
|
|
- static {
|
|
|
- java.lang.String[] descriptorData = {
|
|
|
- "\n\022datatransfer.proto\032\nhdfs.proto\"`\n\017Base" +
|
|
|
- "HeaderProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlo" +
|
|
|
- "ckProto\022)\n\005token\030\002 \001(\0132\032.BlockTokenIdent" +
|
|
|
- "ifierProto\"V\n\032ClientOperationHeaderProto" +
|
|
|
- "\022$\n\nbaseHeader\030\001 \002(\0132\020.BaseHeaderProto\022\022" +
|
|
|
- "\n\nclientName\030\002 \002(\t\"\\\n\020OpReadBlockProto\022+" +
|
|
|
- "\n\006header\030\001 \002(\0132\033.ClientOperationHeaderPr" +
|
|
|
- "oto\022\016\n\006offset\030\002 \002(\004\022\013\n\003len\030\003 \002(\004\"\205\001\n\rChe" +
|
|
|
- "cksumProto\022)\n\004type\030\001 \002(\0162\033.ChecksumProto" +
|
|
|
- ".ChecksumType\022\030\n\020bytesPerChecksum\030\002 \002(\r\"",
|
|
|
- "/\n\014ChecksumType\022\010\n\004NULL\020\000\022\t\n\005CRC32\020\001\022\n\n\006" +
|
|
|
- "CRC32C\020\002\"\332\004\n\021OpWriteBlockProto\022+\n\006header" +
|
|
|
- "\030\001 \002(\0132\033.ClientOperationHeaderProto\022#\n\007t" +
|
|
|
- "argets\030\002 \003(\0132\022.DatanodeInfoProto\022\"\n\006sour" +
|
|
|
- "ce\030\003 \001(\0132\022.DatanodeInfoProto\0228\n\005stage\030\004 " +
|
|
|
- "\002(\0162).OpWriteBlockProto.BlockConstructio" +
|
|
|
- "nStage\022\024\n\014pipelineSize\030\005 \002(\r\022\024\n\014minBytes" +
|
|
|
- "Rcvd\030\006 \002(\004\022\024\n\014maxBytesRcvd\030\007 \002(\004\022\035\n\025late" +
|
|
|
- "stGenerationStamp\030\010 \002(\004\022)\n\021requestedChec" +
|
|
|
- "ksum\030\t \002(\0132\016.ChecksumProto\"\210\002\n\026BlockCons",
|
|
|
- "tructionStage\022\031\n\025PIPELINE_SETUP_APPEND\020\000" +
|
|
|
- "\022\"\n\036PIPELINE_SETUP_APPEND_RECOVERY\020\001\022\022\n\016" +
|
|
|
- "DATA_STREAMING\020\002\022%\n!PIPELINE_SETUP_STREA" +
|
|
|
- "MING_RECOVERY\020\003\022\022\n\016PIPELINE_CLOSE\020\004\022\033\n\027P" +
|
|
|
- "IPELINE_CLOSE_RECOVERY\020\005\022\031\n\025PIPELINE_SET" +
|
|
|
- "UP_CREATE\020\006\022\020\n\014TRANSFER_RBW\020\007\022\026\n\022TRANSFE" +
|
|
|
- "R_FINALIZED\020\010\"h\n\024OpTransferBlockProto\022+\n" +
|
|
|
- "\006header\030\001 \002(\0132\033.ClientOperationHeaderPro" +
|
|
|
- "to\022#\n\007targets\030\002 \003(\0132\022.DatanodeInfoProto\"" +
|
|
|
- "l\n\023OpReplaceBlockProto\022 \n\006header\030\001 \002(\0132\020",
|
|
|
- ".BaseHeaderProto\022\017\n\007delHint\030\002 \002(\t\022\"\n\006sou" +
|
|
|
- "rce\030\003 \002(\0132\022.DatanodeInfoProto\"4\n\020OpCopyB" +
|
|
|
- "lockProto\022 \n\006header\030\001 \002(\0132\020.BaseHeaderPr" +
|
|
|
- "oto\"8\n\024OpBlockChecksumProto\022 \n\006header\030\001 " +
|
|
|
- "\002(\0132\020.BaseHeaderProto\"e\n\021PacketHeaderPro" +
|
|
|
- "to\022\025\n\roffsetInBlock\030\001 \002(\020\022\r\n\005seqno\030\002 \002(\020" +
|
|
|
- "\022\031\n\021lastPacketInBlock\030\003 \002(\010\022\017\n\007dataLen\030\004" +
|
|
|
- " \002(\017\":\n\020PipelineAckProto\022\r\n\005seqno\030\001 \002(\022\022" +
|
|
|
- "\027\n\006status\030\002 \003(\0162\007.Status\"P\n\027ReadOpChecks" +
|
|
|
- "umInfoProto\022 \n\010checksum\030\001 \002(\0132\016.Checksum",
|
|
|
- "Proto\022\023\n\013chunkOffset\030\002 \002(\004\"\305\001\n\024BlockOpRe" +
|
|
|
- "sponseProto\022\027\n\006status\030\001 \002(\0162\007.Status\022\024\n\014" +
|
|
|
- "firstBadLink\030\002 \001(\t\0227\n\020checksumResponse\030\003" +
|
|
|
- " \001(\0132\035.OpBlockChecksumResponseProto\0224\n\022r" +
|
|
|
- "eadOpChecksumInfo\030\004 \001(\0132\030.ReadOpChecksum" +
|
|
|
- "InfoProto\022\017\n\007message\030\005 \001(\t\"0\n\025ClientRead" +
|
|
|
- "StatusProto\022\027\n\006status\030\001 \002(\0162\007.Status\"-\n\022" +
|
|
|
- "DNTransferAckProto\022\027\n\006status\030\001 \002(\0162\007.Sta" +
|
|
|
- "tus\"U\n\034OpBlockChecksumResponseProto\022\023\n\013b" +
|
|
|
- "ytesPerCrc\030\001 \002(\r\022\023\n\013crcPerBlock\030\002 \002(\004\022\013\n",
|
|
|
- "\003md5\030\003 \002(\014*\202\001\n\006Status\022\013\n\007SUCCESS\020\000\022\t\n\005ER" +
|
|
|
- "ROR\020\001\022\022\n\016ERROR_CHECKSUM\020\002\022\021\n\rERROR_INVAL" +
|
|
|
- "ID\020\003\022\020\n\014ERROR_EXISTS\020\004\022\026\n\022ERROR_ACCESS_T" +
|
|
|
- "OKEN\020\005\022\017\n\013CHECKSUM_OK\020\006B>\n%org.apache.ha" +
|
|
|
- "doop.hdfs.protocol.protoB\022DataTransferPr" +
|
|
|
- "otos\240\001\001"
|
|
|
- };
|
|
|
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
- com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
- descriptor = root;
|
|
|
- internal_static_BaseHeaderProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(0);
|
|
|
- internal_static_BaseHeaderProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_BaseHeaderProto_descriptor,
|
|
|
- new java.lang.String[] { "Block", "Token", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
|
|
|
- internal_static_ClientOperationHeaderProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(1);
|
|
|
- internal_static_ClientOperationHeaderProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_ClientOperationHeaderProto_descriptor,
|
|
|
- new java.lang.String[] { "BaseHeader", "ClientName", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
|
|
|
- internal_static_OpReadBlockProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(2);
|
|
|
- internal_static_OpReadBlockProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpReadBlockProto_descriptor,
|
|
|
- new java.lang.String[] { "Header", "Offset", "Len", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
|
|
|
- internal_static_ChecksumProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(3);
|
|
|
- internal_static_ChecksumProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_ChecksumProto_descriptor,
|
|
|
- new java.lang.String[] { "Type", "BytesPerChecksum", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
|
|
|
- internal_static_OpWriteBlockProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(4);
|
|
|
- internal_static_OpWriteBlockProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpWriteBlockProto_descriptor,
|
|
|
- new java.lang.String[] { "Header", "Targets", "Source", "Stage", "PipelineSize", "MinBytesRcvd", "MaxBytesRcvd", "LatestGenerationStamp", "RequestedChecksum", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
|
|
|
- internal_static_OpTransferBlockProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(5);
|
|
|
- internal_static_OpTransferBlockProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpTransferBlockProto_descriptor,
|
|
|
- new java.lang.String[] { "Header", "Targets", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
|
|
|
- internal_static_OpReplaceBlockProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(6);
|
|
|
- internal_static_OpReplaceBlockProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpReplaceBlockProto_descriptor,
|
|
|
- new java.lang.String[] { "Header", "DelHint", "Source", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
|
|
|
- internal_static_OpCopyBlockProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(7);
|
|
|
- internal_static_OpCopyBlockProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpCopyBlockProto_descriptor,
|
|
|
- new java.lang.String[] { "Header", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
|
|
|
- internal_static_OpBlockChecksumProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(8);
|
|
|
- internal_static_OpBlockChecksumProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpBlockChecksumProto_descriptor,
|
|
|
- new java.lang.String[] { "Header", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
|
|
|
- internal_static_PacketHeaderProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(9);
|
|
|
- internal_static_PacketHeaderProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_PacketHeaderProto_descriptor,
|
|
|
- new java.lang.String[] { "OffsetInBlock", "Seqno", "LastPacketInBlock", "DataLen", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
|
|
|
- internal_static_PipelineAckProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(10);
|
|
|
- internal_static_PipelineAckProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_PipelineAckProto_descriptor,
|
|
|
- new java.lang.String[] { "Seqno", "Status", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
|
|
|
- internal_static_ReadOpChecksumInfoProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(11);
|
|
|
- internal_static_ReadOpChecksumInfoProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_ReadOpChecksumInfoProto_descriptor,
|
|
|
- new java.lang.String[] { "Checksum", "ChunkOffset", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
|
|
|
- internal_static_BlockOpResponseProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(12);
|
|
|
- internal_static_BlockOpResponseProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_BlockOpResponseProto_descriptor,
|
|
|
- new java.lang.String[] { "Status", "FirstBadLink", "ChecksumResponse", "ReadOpChecksumInfo", "Message", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
|
|
|
- internal_static_ClientReadStatusProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(13);
|
|
|
- internal_static_ClientReadStatusProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_ClientReadStatusProto_descriptor,
|
|
|
- new java.lang.String[] { "Status", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
|
|
|
- internal_static_DNTransferAckProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(14);
|
|
|
- internal_static_DNTransferAckProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_DNTransferAckProto_descriptor,
|
|
|
- new java.lang.String[] { "Status", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
|
|
|
- internal_static_OpBlockChecksumResponseProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(15);
|
|
|
- internal_static_OpBlockChecksumResponseProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_OpBlockChecksumResponseProto_descriptor,
|
|
|
- new java.lang.String[] { "BytesPerCrc", "CrcPerBlock", "Md5", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
|
|
|
- return null;
|
|
|
- }
|
|
|
- };
|
|
|
- com.google.protobuf.Descriptors.FileDescriptor
|
|
|
- .internalBuildGeneratedFileFrom(descriptorData,
|
|
|
- new com.google.protobuf.Descriptors.FileDescriptor[] {
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
|
|
|
- }, assigner);
|
|
|
- }
|
|
|
-
|
|
|
- // @@protoc_insertion_point(outer_class_scope)
|
|
|
-}
|