|
@@ -0,0 +1,2516 @@
|
|
|
+// Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
|
+// source: InterDatanodeProtocol.proto
|
|
|
+
|
|
|
+package org.apache.hadoop.hdfs.protocol.proto;
|
|
|
+
|
|
|
+public final class InterDatanodeProtocolProtos {
|
|
|
+ private InterDatanodeProtocolProtos() {}
|
|
|
+ public static void registerAllExtensions(
|
|
|
+ com.google.protobuf.ExtensionRegistry registry) {
|
|
|
+ }
|
|
|
+ public interface InitReplicaRecoveryRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .RecoveringBlockProto block = 1;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class InitReplicaRecoveryRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements InitReplicaRecoveryRequestProtoOrBuilder {
|
|
|
+ // Use InitReplicaRecoveryRequestProto.newBuilder() to construct.
|
|
|
+ private InitReplicaRecoveryRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private InitReplicaRecoveryRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final InitReplicaRecoveryRequestProto defaultInstance;
|
|
|
+ public static InitReplicaRecoveryRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public InitReplicaRecoveryRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .RecoveringBlockProto block = 1;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, block_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .RecoveringBlockProto block = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:InitReplicaRecoveryRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new InitReplicaRecoveryRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:InitReplicaRecoveryRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface InitReplicaRecoveryResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ReplicaState state = 1;
|
|
|
+ boolean hasState();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState();
|
|
|
+
|
|
|
+ // required .BlockProto block = 2;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class InitReplicaRecoveryResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements InitReplicaRecoveryResponseProtoOrBuilder {
|
|
|
+ // Use InitReplicaRecoveryResponseProto.newBuilder() to construct.
|
|
|
+ private InitReplicaRecoveryResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private InitReplicaRecoveryResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final InitReplicaRecoveryResponseProto defaultInstance;
|
|
|
+ public static InitReplicaRecoveryResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public InitReplicaRecoveryResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ReplicaState state = 1;
|
|
|
+ public static final int STATE_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_;
|
|
|
+ public boolean hasState() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() {
|
|
|
+ return state_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .BlockProto block = 2;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 2;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasState()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeEnum(1, state_.getNumber());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeMessage(2, block_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeEnumSize(1, state_.getNumber());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(2, block_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasState() == other.hasState());
|
|
|
+ if (hasState()) {
|
|
|
+ result = result &&
|
|
|
+ (getState() == other.getState());
|
|
|
+ }
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasState()) {
|
|
|
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashEnum(getState());
|
|
|
+ }
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.state_ = state_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasState()) {
|
|
|
+ setState(other.getState());
|
|
|
+ }
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasState()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ int rawValue = input.readEnum();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.valueOf(rawValue);
|
|
|
+ if (value == null) {
|
|
|
+ unknownFields.mergeVarintField(1, rawValue);
|
|
|
+ } else {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ state_ = value;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 18: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ReplicaState state = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
|
|
+ public boolean hasState() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState getState() {
|
|
|
+ return state_;
|
|
|
+ }
|
|
|
+ public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ state_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearState() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ state_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaState.FINALIZED;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .BlockProto block = 2;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:InitReplicaRecoveryResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new InitReplicaRecoveryResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:InitReplicaRecoveryResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface UpdateReplicaUnderRecoveryRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+
|
|
|
+ // required uint64 recoveryId = 2;
|
|
|
+ boolean hasRecoveryId();
|
|
|
+ long getRecoveryId();
|
|
|
+
|
|
|
+ // required uint64 newLength = 3;
|
|
|
+ boolean hasNewLength();
|
|
|
+ long getNewLength();
|
|
|
+ }
|
|
|
+ public static final class UpdateReplicaUnderRecoveryRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements UpdateReplicaUnderRecoveryRequestProtoOrBuilder {
|
|
|
+ // Use UpdateReplicaUnderRecoveryRequestProto.newBuilder() to construct.
|
|
|
+ private UpdateReplicaUnderRecoveryRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private UpdateReplicaUnderRecoveryRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final UpdateReplicaUnderRecoveryRequestProto defaultInstance;
|
|
|
+ public static UpdateReplicaUnderRecoveryRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public UpdateReplicaUnderRecoveryRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 recoveryId = 2;
|
|
|
+ public static final int RECOVERYID_FIELD_NUMBER = 2;
|
|
|
+ private long recoveryId_;
|
|
|
+ public boolean hasRecoveryId() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getRecoveryId() {
|
|
|
+ return recoveryId_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 newLength = 3;
|
|
|
+ public static final int NEWLENGTH_FIELD_NUMBER = 3;
|
|
|
+ private long newLength_;
|
|
|
+ public boolean hasNewLength() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public long getNewLength() {
|
|
|
+ return newLength_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ recoveryId_ = 0L;
|
|
|
+ newLength_ = 0L;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasRecoveryId()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasNewLength()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, block_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt64(2, recoveryId_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeUInt64(3, newLength_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(2, recoveryId_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(3, newLength_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result && (hasRecoveryId() == other.hasRecoveryId());
|
|
|
+ if (hasRecoveryId()) {
|
|
|
+ result = result && (getRecoveryId()
|
|
|
+ == other.getRecoveryId());
|
|
|
+ }
|
|
|
+ result = result && (hasNewLength() == other.hasNewLength());
|
|
|
+ if (hasNewLength()) {
|
|
|
+ result = result && (getNewLength()
|
|
|
+ == other.getNewLength());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ if (hasRecoveryId()) {
|
|
|
+ hash = (37 * hash) + RECOVERYID_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getRecoveryId());
|
|
|
+ }
|
|
|
+ if (hasNewLength()) {
|
|
|
+ hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getNewLength());
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ recoveryId_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ newLength_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.recoveryId_ = recoveryId_;
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.newLength_ = newLength_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ if (other.hasRecoveryId()) {
|
|
|
+ setRecoveryId(other.getRecoveryId());
|
|
|
+ }
|
|
|
+ if (other.hasNewLength()) {
|
|
|
+ setNewLength(other.getNewLength());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasRecoveryId()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasNewLength()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ recoveryId_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 24: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ newLength_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 recoveryId = 2;
|
|
|
+ private long recoveryId_ ;
|
|
|
+ public boolean hasRecoveryId() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getRecoveryId() {
|
|
|
+ return recoveryId_;
|
|
|
+ }
|
|
|
+ public Builder setRecoveryId(long value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ recoveryId_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearRecoveryId() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ recoveryId_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 newLength = 3;
|
|
|
+ private long newLength_ ;
|
|
|
+ public boolean hasNewLength() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public long getNewLength() {
|
|
|
+ return newLength_;
|
|
|
+ }
|
|
|
+ public Builder setNewLength(long value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ newLength_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearNewLength() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ newLength_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:UpdateReplicaUnderRecoveryRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new UpdateReplicaUnderRecoveryRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:UpdateReplicaUnderRecoveryRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface UpdateReplicaUnderRecoveryResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class UpdateReplicaUnderRecoveryResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements UpdateReplicaUnderRecoveryResponseProtoOrBuilder {
|
|
|
+ // Use UpdateReplicaUnderRecoveryResponseProto.newBuilder() to construct.
|
|
|
+ private UpdateReplicaUnderRecoveryResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private UpdateReplicaUnderRecoveryResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final UpdateReplicaUnderRecoveryResponseProto defaultInstance;
|
|
|
+ public static UpdateReplicaUnderRecoveryResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public UpdateReplicaUnderRecoveryResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, block_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:UpdateReplicaUnderRecoveryResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new UpdateReplicaUnderRecoveryResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:UpdateReplicaUnderRecoveryResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public static abstract class InterDatanodeProtocolService
|
|
|
+ implements com.google.protobuf.Service {
|
|
|
+ protected InterDatanodeProtocolService() {}
|
|
|
+
|
|
|
+ public interface Interface {
|
|
|
+ public abstract void initReplicaRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void updateReplicaUnderRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto> done);
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.Service newReflectiveService(
|
|
|
+ final Interface impl) {
|
|
|
+ return new InterDatanodeProtocolService() {
|
|
|
+ @java.lang.Override
|
|
|
+ public void initReplicaRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto> done) {
|
|
|
+ impl.initReplicaRecovery(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public void updateReplicaUnderRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto> done) {
|
|
|
+ impl.updateReplicaUnderRecovery(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.BlockingService
|
|
|
+ newReflectiveBlockingService(final BlockingInterface impl) {
|
|
|
+ return new com.google.protobuf.BlockingService() {
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message callBlockingMethod(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ com.google.protobuf.Message request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.callBlockingMethod() given method descriptor for " +
|
|
|
+ "wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return impl.initReplicaRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)request);
|
|
|
+ case 1:
|
|
|
+ return impl.updateReplicaUnderRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)request);
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getRequestPrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getResponsePrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ public abstract void initReplicaRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void updateReplicaUnderRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto> done);
|
|
|
+
|
|
|
+ public static final
|
|
|
+ com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.getDescriptor().getServices().get(0);
|
|
|
+ }
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public final void callMethod(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ com.google.protobuf.Message request,
|
|
|
+ com.google.protobuf.RpcCallback<
|
|
|
+ com.google.protobuf.Message> done) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.callMethod() given method descriptor for wrong " +
|
|
|
+ "service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ this.initReplicaRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ case 1:
|
|
|
+ this.updateReplicaUnderRecovery(controller, (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getRequestPrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getResponsePrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Stub newStub(
|
|
|
+ com.google.protobuf.RpcChannel channel) {
|
|
|
+ return new Stub(channel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService implements Interface {
|
|
|
+ private Stub(com.google.protobuf.RpcChannel channel) {
|
|
|
+ this.channel = channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ private final com.google.protobuf.RpcChannel channel;
|
|
|
+
|
|
|
+ public com.google.protobuf.RpcChannel getChannel() {
|
|
|
+ return channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void initReplicaRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+
|
|
|
+ public void updateReplicaUnderRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static BlockingInterface newBlockingStub(
|
|
|
+ com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
+ return new BlockingStub(channel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface BlockingInterface {
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto initReplicaRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final class BlockingStub implements BlockingInterface {
|
|
|
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
+ this.channel = channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ private final com.google.protobuf.BlockingRpcChannel channel;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto initReplicaRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_InitReplicaRecoveryRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_InitReplicaRecoveryResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable;
|
|
|
+
|
|
|
+ public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return descriptor;
|
|
|
+ }
|
|
|
+ private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ descriptor;
|
|
|
+ static {
|
|
|
+ java.lang.String[] descriptorData = {
|
|
|
+ "\n\033InterDatanodeProtocol.proto\032\nhdfs.prot" +
|
|
|
+ "o\"G\n\037InitReplicaRecoveryRequestProto\022$\n\005" +
|
|
|
+ "block\030\001 \002(\0132\025.RecoveringBlockProto\"\\\n In" +
|
|
|
+ "itReplicaRecoveryResponseProto\022\034\n\005state\030" +
|
|
|
+ "\001 \002(\0162\r.ReplicaState\022\032\n\005block\030\002 \002(\0132\013.Bl" +
|
|
|
+ "ockProto\"s\n&UpdateReplicaUnderRecoveryRe" +
|
|
|
+ "questProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBloc" +
|
|
|
+ "kProto\022\022\n\nrecoveryId\030\002 \002(\004\022\021\n\tnewLength\030" +
|
|
|
+ "\003 \002(\004\"M\n\'UpdateReplicaUnderRecoveryRespo" +
|
|
|
+ "nseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockP",
|
|
|
+ "roto2\353\001\n\034InterDatanodeProtocolService\022Z\n" +
|
|
|
+ "\023initReplicaRecovery\022 .InitReplicaRecove" +
|
|
|
+ "ryRequestProto\032!.InitReplicaRecoveryResp" +
|
|
|
+ "onseProto\022o\n\032updateReplicaUnderRecovery\022" +
|
|
|
+ "\'.UpdateReplicaUnderRecoveryRequestProto" +
|
|
|
+ "\032(.UpdateReplicaUnderRecoveryResponsePro" +
|
|
|
+ "toBJ\n%org.apache.hadoop.hdfs.protocol.pr" +
|
|
|
+ "otoB\033InterDatanodeProtocolProtos\210\001\001\240\001\001"
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
+ descriptor = root;
|
|
|
+ internal_static_InitReplicaRecoveryRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(0);
|
|
|
+ internal_static_InitReplicaRecoveryRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_InitReplicaRecoveryRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "Block", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto.Builder.class);
|
|
|
+ internal_static_InitReplicaRecoveryResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(1);
|
|
|
+ internal_static_InitReplicaRecoveryResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_InitReplicaRecoveryResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { "State", "Block", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto.Builder.class);
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(2);
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "Block", "RecoveryId", "NewLength", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto.Builder.class);
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(3);
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_UpdateReplicaUnderRecoveryResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { "Block", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto.Builder.class);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ .internalBuildGeneratedFileFrom(descriptorData,
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
|
|
|
+ }, assigner);
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(outer_class_scope)
|
|
|
+}
|