|
@@ -0,0 +1,4162 @@
|
|
|
+// Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
|
+// source: ClientDatanodeProtocol.proto
|
|
|
+
|
|
|
+package org.apache.hadoop.hdfs.protocol.proto;
|
|
|
+
|
|
|
+public final class ClientDatanodeProtocolProtos {
|
|
|
+ private ClientDatanodeProtocolProtos() {}
|
|
|
+ public static void registerAllExtensions(
|
|
|
+ com.google.protobuf.ExtensionRegistry registry) {
|
|
|
+ }
|
|
|
+ public interface GetReplicaVisibleLengthRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class GetReplicaVisibleLengthRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements GetReplicaVisibleLengthRequestProtoOrBuilder {
|
|
|
+ // Use GetReplicaVisibleLengthRequestProto.newBuilder() to construct.
|
|
|
+ private GetReplicaVisibleLengthRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private GetReplicaVisibleLengthRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final GetReplicaVisibleLengthRequestProto defaultInstance;
|
|
|
+ public static GetReplicaVisibleLengthRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, block_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:GetReplicaVisibleLengthRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new GetReplicaVisibleLengthRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:GetReplicaVisibleLengthRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface GetReplicaVisibleLengthResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required uint64 length = 1;
|
|
|
+ boolean hasLength();
|
|
|
+ long getLength();
|
|
|
+ }
|
|
|
+ public static final class GetReplicaVisibleLengthResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements GetReplicaVisibleLengthResponseProtoOrBuilder {
|
|
|
+ // Use GetReplicaVisibleLengthResponseProto.newBuilder() to construct.
|
|
|
+ private GetReplicaVisibleLengthResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private GetReplicaVisibleLengthResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final GetReplicaVisibleLengthResponseProto defaultInstance;
|
|
|
+ public static GetReplicaVisibleLengthResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required uint64 length = 1;
|
|
|
+ public static final int LENGTH_FIELD_NUMBER = 1;
|
|
|
+ private long length_;
|
|
|
+ public boolean hasLength() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getLength() {
|
|
|
+ return length_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ length_ = 0L;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasLength()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt64(1, length_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(1, length_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasLength() == other.hasLength());
|
|
|
+ if (hasLength()) {
|
|
|
+ result = result && (getLength()
|
|
|
+ == other.getLength());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasLength()) {
|
|
|
+ hash = (37 * hash) + LENGTH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getLength());
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ length_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.length_ = length_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasLength()) {
|
|
|
+ setLength(other.getLength());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasLength()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ length_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required uint64 length = 1;
|
|
|
+ private long length_ ;
|
|
|
+ public boolean hasLength() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getLength() {
|
|
|
+ return length_;
|
|
|
+ }
|
|
|
+ public Builder setLength(long value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ length_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLength() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ length_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:GetReplicaVisibleLengthResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new GetReplicaVisibleLengthResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:GetReplicaVisibleLengthResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface RefreshNamenodesRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+ }
|
|
|
+ public static final class RefreshNamenodesRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements RefreshNamenodesRequestProtoOrBuilder {
|
|
|
+ // Use RefreshNamenodesRequestProto.newBuilder() to construct.
|
|
|
+ private RefreshNamenodesRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private RefreshNamenodesRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final RefreshNamenodesRequestProto defaultInstance;
|
|
|
+ public static RefreshNamenodesRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public RefreshNamenodesRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto(this);
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance()) return this;
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:RefreshNamenodesRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new RefreshNamenodesRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:RefreshNamenodesRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface RefreshNamenodesResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+ }
|
|
|
+ public static final class RefreshNamenodesResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements RefreshNamenodesResponseProtoOrBuilder {
|
|
|
+ // Use RefreshNamenodesResponseProto.newBuilder() to construct.
|
|
|
+ private RefreshNamenodesResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private RefreshNamenodesResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final RefreshNamenodesResponseProto defaultInstance;
|
|
|
+ public static RefreshNamenodesResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public RefreshNamenodesResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_RefreshNamenodesResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto(this);
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()) return this;
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:RefreshNamenodesResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new RefreshNamenodesResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:RefreshNamenodesResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface DeleteBlockPoolRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required string blockPool = 1;
|
|
|
+ boolean hasBlockPool();
|
|
|
+ String getBlockPool();
|
|
|
+
|
|
|
+ // required bool force = 2;
|
|
|
+ boolean hasForce();
|
|
|
+ boolean getForce();
|
|
|
+ }
|
|
|
+ public static final class DeleteBlockPoolRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements DeleteBlockPoolRequestProtoOrBuilder {
|
|
|
+ // Use DeleteBlockPoolRequestProto.newBuilder() to construct.
|
|
|
+ private DeleteBlockPoolRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private DeleteBlockPoolRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final DeleteBlockPoolRequestProto defaultInstance;
|
|
|
+ public static DeleteBlockPoolRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public DeleteBlockPoolRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required string blockPool = 1;
|
|
|
+ public static final int BLOCKPOOL_FIELD_NUMBER = 1;
|
|
|
+ private java.lang.Object blockPool_;
|
|
|
+ public boolean hasBlockPool() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public String getBlockPool() {
|
|
|
+ java.lang.Object ref = blockPool_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ return (String) ref;
|
|
|
+ } else {
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
+ blockPool_ = s;
|
|
|
+ }
|
|
|
+ return s;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.ByteString getBlockPoolBytes() {
|
|
|
+ java.lang.Object ref = blockPool_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
+ blockPool_ = b;
|
|
|
+ return b;
|
|
|
+ } else {
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool force = 2;
|
|
|
+ public static final int FORCE_FIELD_NUMBER = 2;
|
|
|
+ private boolean force_;
|
|
|
+ public boolean hasForce() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public boolean getForce() {
|
|
|
+ return force_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ blockPool_ = "";
|
|
|
+ force_ = false;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlockPool()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasForce()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeBytes(1, getBlockPoolBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeBool(2, force_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(1, getBlockPoolBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBoolSize(2, force_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlockPool() == other.hasBlockPool());
|
|
|
+ if (hasBlockPool()) {
|
|
|
+ result = result && getBlockPool()
|
|
|
+ .equals(other.getBlockPool());
|
|
|
+ }
|
|
|
+ result = result && (hasForce() == other.hasForce());
|
|
|
+ if (hasForce()) {
|
|
|
+ result = result && (getForce()
|
|
|
+ == other.getForce());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlockPool()) {
|
|
|
+ hash = (37 * hash) + BLOCKPOOL_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlockPool().hashCode();
|
|
|
+ }
|
|
|
+ if (hasForce()) {
|
|
|
+ hash = (37 * hash) + FORCE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashBoolean(getForce());
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ blockPool_ = "";
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ force_ = false;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.blockPool_ = blockPool_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.force_ = force_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlockPool()) {
|
|
|
+ setBlockPool(other.getBlockPool());
|
|
|
+ }
|
|
|
+ if (other.hasForce()) {
|
|
|
+ setForce(other.getForce());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlockPool()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasForce()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ blockPool_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ force_ = input.readBool();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required string blockPool = 1;
|
|
|
+ private java.lang.Object blockPool_ = "";
|
|
|
+ public boolean hasBlockPool() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public String getBlockPool() {
|
|
|
+ java.lang.Object ref = blockPool_;
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
+ blockPool_ = s;
|
|
|
+ return s;
|
|
|
+ } else {
|
|
|
+ return (String) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlockPool(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ blockPool_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlockPool() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ blockPool_ = getDefaultInstance().getBlockPool();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void setBlockPool(com.google.protobuf.ByteString value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ blockPool_ = value;
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool force = 2;
|
|
|
+ private boolean force_ ;
|
|
|
+ public boolean hasForce() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public boolean getForce() {
|
|
|
+ return force_;
|
|
|
+ }
|
|
|
+ public Builder setForce(boolean value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ force_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearForce() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ force_ = false;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:DeleteBlockPoolRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new DeleteBlockPoolRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:DeleteBlockPoolRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface DeleteBlockPoolResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+ }
|
|
|
+ public static final class DeleteBlockPoolResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements DeleteBlockPoolResponseProtoOrBuilder {
|
|
|
+ // Use DeleteBlockPoolResponseProto.newBuilder() to construct.
|
|
|
+ private DeleteBlockPoolResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private DeleteBlockPoolResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final DeleteBlockPoolResponseProto defaultInstance;
|
|
|
+ public static DeleteBlockPoolResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public DeleteBlockPoolResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto(this);
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()) return this;
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:DeleteBlockPoolResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new DeleteBlockPoolResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:DeleteBlockPoolResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface GetBlockLocalPathInfoRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+
|
|
|
+ // required .BlockTokenIdentifierProto token = 2;
|
|
|
+ boolean hasToken();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class GetBlockLocalPathInfoRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements GetBlockLocalPathInfoRequestProtoOrBuilder {
|
|
|
+ // Use GetBlockLocalPathInfoRequestProto.newBuilder() to construct.
|
|
|
+ private GetBlockLocalPathInfoRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private GetBlockLocalPathInfoRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final GetBlockLocalPathInfoRequestProto defaultInstance;
|
|
|
+ public static GetBlockLocalPathInfoRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .BlockTokenIdentifierProto token = 2;
|
|
|
+ public static final int TOKEN_FIELD_NUMBER = 2;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_;
|
|
|
+ public boolean hasToken() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() {
|
|
|
+ return token_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() {
|
|
|
+ return token_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasToken()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getToken().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, block_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeMessage(2, token_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(2, token_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result && (hasToken() == other.hasToken());
|
|
|
+ if (hasToken()) {
|
|
|
+ result = result && getToken()
|
|
|
+ .equals(other.getToken());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ if (hasToken()) {
|
|
|
+ hash = (37 * hash) + TOKEN_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getToken().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ getTokenFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ tokenBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ result.token_ = token_;
|
|
|
+ } else {
|
|
|
+ result.token_ = tokenBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ if (other.hasToken()) {
|
|
|
+ mergeToken(other.getToken());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasToken()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getToken().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 18: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder();
|
|
|
+ if (hasToken()) {
|
|
|
+ subBuilder.mergeFrom(getToken());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setToken(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .BlockTokenIdentifierProto token = 2;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> tokenBuilder_;
|
|
|
+ public boolean hasToken() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getToken() {
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ return token_;
|
|
|
+ } else {
|
|
|
+ return tokenBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ token_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ tokenBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setToken(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) {
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ token_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ tokenBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
|
|
+ token_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) {
|
|
|
+ token_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(token_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ token_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ tokenBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearToken() {
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ token_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ tokenBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getTokenBuilder() {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ onChanged();
|
|
|
+ return getTokenFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getTokenOrBuilder() {
|
|
|
+ if (tokenBuilder_ != null) {
|
|
|
+ return tokenBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return token_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>
|
|
|
+ getTokenFieldBuilder() {
|
|
|
+ if (tokenBuilder_ == null) {
|
|
|
+ tokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>(
|
|
|
+ token_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ token_ = null;
|
|
|
+ }
|
|
|
+ return tokenBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:GetBlockLocalPathInfoRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new GetBlockLocalPathInfoRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:GetBlockLocalPathInfoRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface GetBlockLocalPathInfoResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ boolean hasBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
+
|
|
|
+ // required string localPath = 2;
|
|
|
+ boolean hasLocalPath();
|
|
|
+ String getLocalPath();
|
|
|
+
|
|
|
+ // required string localMetaPath = 3;
|
|
|
+ boolean hasLocalMetaPath();
|
|
|
+ String getLocalMetaPath();
|
|
|
+ }
|
|
|
+ public static final class GetBlockLocalPathInfoResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements GetBlockLocalPathInfoResponseProtoOrBuilder {
|
|
|
+ // Use GetBlockLocalPathInfoResponseProto.newBuilder() to construct.
|
|
|
+ private GetBlockLocalPathInfoResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private GetBlockLocalPathInfoResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final GetBlockLocalPathInfoResponseProto defaultInstance;
|
|
|
+ public static GetBlockLocalPathInfoResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string localPath = 2;
|
|
|
+ public static final int LOCALPATH_FIELD_NUMBER = 2;
|
|
|
+ private java.lang.Object localPath_;
|
|
|
+ public boolean hasLocalPath() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public String getLocalPath() {
|
|
|
+ java.lang.Object ref = localPath_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ return (String) ref;
|
|
|
+ } else {
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
+ localPath_ = s;
|
|
|
+ }
|
|
|
+ return s;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.ByteString getLocalPathBytes() {
|
|
|
+ java.lang.Object ref = localPath_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
+ localPath_ = b;
|
|
|
+ return b;
|
|
|
+ } else {
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string localMetaPath = 3;
|
|
|
+ public static final int LOCALMETAPATH_FIELD_NUMBER = 3;
|
|
|
+ private java.lang.Object localMetaPath_;
|
|
|
+ public boolean hasLocalMetaPath() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public String getLocalMetaPath() {
|
|
|
+ java.lang.Object ref = localMetaPath_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ return (String) ref;
|
|
|
+ } else {
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
+ localMetaPath_ = s;
|
|
|
+ }
|
|
|
+ return s;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.ByteString getLocalMetaPathBytes() {
|
|
|
+ java.lang.Object ref = localMetaPath_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
+ localMetaPath_ = b;
|
|
|
+ return b;
|
|
|
+ } else {
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ localPath_ = "";
|
|
|
+ localMetaPath_ = "";
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlock()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasLocalPath()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasLocalMetaPath()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, block_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeBytes(2, getLocalPathBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeBytes(3, getLocalMetaPathBytes());
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(2, getLocalPathBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(3, getLocalMetaPathBytes());
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
+ if (hasBlock()) {
|
|
|
+ result = result && getBlock()
|
|
|
+ .equals(other.getBlock());
|
|
|
+ }
|
|
|
+ result = result && (hasLocalPath() == other.hasLocalPath());
|
|
|
+ if (hasLocalPath()) {
|
|
|
+ result = result && getLocalPath()
|
|
|
+ .equals(other.getLocalPath());
|
|
|
+ }
|
|
|
+ result = result && (hasLocalMetaPath() == other.hasLocalMetaPath());
|
|
|
+ if (hasLocalMetaPath()) {
|
|
|
+ result = result && getLocalMetaPath()
|
|
|
+ .equals(other.getLocalMetaPath());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlock()) {
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
+ }
|
|
|
+ if (hasLocalPath()) {
|
|
|
+ hash = (37 * hash) + LOCALPATH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getLocalPath().hashCode();
|
|
|
+ }
|
|
|
+ if (hasLocalMetaPath()) {
|
|
|
+ hash = (37 * hash) + LOCALMETAPATH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getLocalMetaPath().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ localPath_ = "";
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ localMetaPath_ = "";
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ result.block_ = block_;
|
|
|
+ } else {
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.localPath_ = localPath_;
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.localMetaPath_ = localMetaPath_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlock()) {
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
+ }
|
|
|
+ if (other.hasLocalPath()) {
|
|
|
+ setLocalPath(other.getLocalPath());
|
|
|
+ }
|
|
|
+ if (other.hasLocalMetaPath()) {
|
|
|
+ setLocalMetaPath(other.getLocalMetaPath());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlock()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasLocalPath()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasLocalMetaPath()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
+ if (hasBlock()) {
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 18: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ localPath_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 26: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ localMetaPath_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
+ public boolean hasBlock() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ return block_;
|
|
|
+ } else {
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ block_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
+ block_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ block_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlock() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return block_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
+ getBlockFieldBuilder() {
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
+ block_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ block_ = null;
|
|
|
+ }
|
|
|
+ return blockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string localPath = 2;
|
|
|
+ private java.lang.Object localPath_ = "";
|
|
|
+ public boolean hasLocalPath() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public String getLocalPath() {
|
|
|
+ java.lang.Object ref = localPath_;
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
+ localPath_ = s;
|
|
|
+ return s;
|
|
|
+ } else {
|
|
|
+ return (String) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setLocalPath(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ localPath_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLocalPath() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ localPath_ = getDefaultInstance().getLocalPath();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void setLocalPath(com.google.protobuf.ByteString value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ localPath_ = value;
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string localMetaPath = 3;
|
|
|
+ private java.lang.Object localMetaPath_ = "";
|
|
|
+ public boolean hasLocalMetaPath() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public String getLocalMetaPath() {
|
|
|
+ java.lang.Object ref = localMetaPath_;
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
+ localMetaPath_ = s;
|
|
|
+ return s;
|
|
|
+ } else {
|
|
|
+ return (String) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setLocalMetaPath(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ localMetaPath_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLocalMetaPath() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ localMetaPath_ = getDefaultInstance().getLocalMetaPath();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void setLocalMetaPath(com.google.protobuf.ByteString value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ localMetaPath_ = value;
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:GetBlockLocalPathInfoResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new GetBlockLocalPathInfoResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:GetBlockLocalPathInfoResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public static abstract class ClientDatanodeProtocolService
|
|
|
+ implements com.google.protobuf.Service {
|
|
|
+ protected ClientDatanodeProtocolService() {}
|
|
|
+
|
|
|
+ public interface Interface {
|
|
|
+ public abstract void getReplicaVisibleLength(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void refreshNamenode(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void deleteBlockPool(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void getBlockLocalPathInfo(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto> done);
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.Service newReflectiveService(
|
|
|
+ final Interface impl) {
|
|
|
+ return new ClientDatanodeProtocolService() {
|
|
|
+ @java.lang.Override
|
|
|
+ public void getReplicaVisibleLength(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto> done) {
|
|
|
+ impl.getReplicaVisibleLength(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public void refreshNamenode(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto> done) {
|
|
|
+ impl.refreshNamenode(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public void deleteBlockPool(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto> done) {
|
|
|
+ impl.deleteBlockPool(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public void getBlockLocalPathInfo(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto> done) {
|
|
|
+ impl.getBlockLocalPathInfo(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.BlockingService
|
|
|
+ newReflectiveBlockingService(final BlockingInterface impl) {
|
|
|
+ return new com.google.protobuf.BlockingService() {
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message callBlockingMethod(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ com.google.protobuf.Message request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.callBlockingMethod() given method descriptor for " +
|
|
|
+ "wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return impl.getReplicaVisibleLength(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request);
|
|
|
+ case 1:
|
|
|
+ return impl.refreshNamenode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request);
|
|
|
+ case 2:
|
|
|
+ return impl.deleteBlockPool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request);
|
|
|
+ case 3:
|
|
|
+ return impl.getBlockLocalPathInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request);
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getRequestPrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
|
|
|
+ case 2:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
|
|
|
+ case 3:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getResponsePrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
|
|
|
+ case 2:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
|
|
|
+ case 3:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ public abstract void getReplicaVisibleLength(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void refreshNamenode(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void deleteBlockPool(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void getBlockLocalPathInfo(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto> done);
|
|
|
+
|
|
|
+ public static final
|
|
|
+ com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.getDescriptor().getServices().get(0);
|
|
|
+ }
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public final void callMethod(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ com.google.protobuf.Message request,
|
|
|
+ com.google.protobuf.RpcCallback<
|
|
|
+ com.google.protobuf.Message> done) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.callMethod() given method descriptor for wrong " +
|
|
|
+ "service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ this.getReplicaVisibleLength(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ case 1:
|
|
|
+ this.refreshNamenode(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ case 2:
|
|
|
+ this.deleteBlockPool(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ case 3:
|
|
|
+ this.getBlockLocalPathInfo(controller, (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getRequestPrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.getDefaultInstance();
|
|
|
+ case 2:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.getDefaultInstance();
|
|
|
+ case 3:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getResponsePrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance();
|
|
|
+ case 2:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance();
|
|
|
+ case 3:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Stub newStub(
|
|
|
+ com.google.protobuf.RpcChannel channel) {
|
|
|
+ return new Stub(channel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService implements Interface {
|
|
|
+ private Stub(com.google.protobuf.RpcChannel channel) {
|
|
|
+ this.channel = channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ private final com.google.protobuf.RpcChannel channel;
|
|
|
+
|
|
|
+ public com.google.protobuf.RpcChannel getChannel() {
|
|
|
+ return channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void getReplicaVisibleLength(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+
|
|
|
+ public void refreshNamenode(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+
|
|
|
+ public void deleteBlockPool(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(2),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+
|
|
|
+ public void getBlockLocalPathInfo(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(3),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static BlockingInterface newBlockingStub(
|
|
|
+ com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
+ return new BlockingStub(channel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface BlockingInterface {
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenode(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final class BlockingStub implements BlockingInterface {
|
|
|
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
+ this.channel = channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ private final com.google.protobuf.BlockingRpcChannel channel;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto refreshNamenode(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto deleteBlockPool(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(2),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(3),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_GetReplicaVisibleLengthRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_GetReplicaVisibleLengthResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_RefreshNamenodesRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_RefreshNamenodesRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_RefreshNamenodesResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_RefreshNamenodesResponseProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_DeleteBlockPoolRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_DeleteBlockPoolResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_GetBlockLocalPathInfoRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_GetBlockLocalPathInfoResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable;
|
|
|
+
|
|
|
+ public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return descriptor;
|
|
|
+ }
|
|
|
+ private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ descriptor;
|
|
|
+ static {
|
|
|
+ java.lang.String[] descriptorData = {
|
|
|
+ "\n\034ClientDatanodeProtocol.proto\032\nhdfs.pro" +
|
|
|
+ "to\"I\n#GetReplicaVisibleLengthRequestProt" +
|
|
|
+ "o\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockProto\"6\n" +
|
|
|
+ "$GetReplicaVisibleLengthResponseProto\022\016\n" +
|
|
|
+ "\006length\030\001 \002(\004\"\036\n\034RefreshNamenodesRequest" +
|
|
|
+ "Proto\"\037\n\035RefreshNamenodesResponseProto\"?" +
|
|
|
+ "\n\033DeleteBlockPoolRequestProto\022\021\n\tblockPo" +
|
|
|
+ "ol\030\001 \002(\t\022\r\n\005force\030\002 \002(\010\"\036\n\034DeleteBlockPo" +
|
|
|
+ "olResponseProto\"r\n!GetBlockLocalPathInfo" +
|
|
|
+ "RequestProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBl",
|
|
|
+ "ockProto\022)\n\005token\030\002 \002(\0132\032.BlockTokenIden" +
|
|
|
+ "tifierProto\"r\n\"GetBlockLocalPathInfoResp" +
|
|
|
+ "onseProto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlock" +
|
|
|
+ "Proto\022\021\n\tlocalPath\030\002 \002(\t\022\025\n\rlocalMetaPat" +
|
|
|
+ "h\030\003 \002(\t2\213\003\n\035ClientDatanodeProtocolServic" +
|
|
|
+ "e\022f\n\027getReplicaVisibleLength\022$.GetReplic" +
|
|
|
+ "aVisibleLengthRequestProto\032%.GetReplicaV" +
|
|
|
+ "isibleLengthResponseProto\022P\n\017refreshName" +
|
|
|
+ "node\022\035.RefreshNamenodesRequestProto\032\036.Re" +
|
|
|
+ "freshNamenodesResponseProto\022N\n\017deleteBlo",
|
|
|
+ "ckPool\022\034.DeleteBlockPoolRequestProto\032\035.D" +
|
|
|
+ "eleteBlockPoolResponseProto\022`\n\025getBlockL" +
|
|
|
+ "ocalPathInfo\022\".GetBlockLocalPathInfoRequ" +
|
|
|
+ "estProto\032#.GetBlockLocalPathInfoResponse" +
|
|
|
+ "ProtoBK\n%org.apache.hadoop.hdfs.protocol" +
|
|
|
+ ".protoB\034ClientDatanodeProtocolProtos\210\001\001\240" +
|
|
|
+ "\001\001"
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
+ descriptor = root;
|
|
|
+ internal_static_GetReplicaVisibleLengthRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(0);
|
|
|
+ internal_static_GetReplicaVisibleLengthRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_GetReplicaVisibleLengthRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "Block", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto.Builder.class);
|
|
|
+ internal_static_GetReplicaVisibleLengthResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(1);
|
|
|
+ internal_static_GetReplicaVisibleLengthResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_GetReplicaVisibleLengthResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { "Length", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto.Builder.class);
|
|
|
+ internal_static_RefreshNamenodesRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(2);
|
|
|
+ internal_static_RefreshNamenodesRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_RefreshNamenodesRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto.Builder.class);
|
|
|
+ internal_static_RefreshNamenodesResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(3);
|
|
|
+ internal_static_RefreshNamenodesResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_RefreshNamenodesResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto.Builder.class);
|
|
|
+ internal_static_DeleteBlockPoolRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(4);
|
|
|
+ internal_static_DeleteBlockPoolRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_DeleteBlockPoolRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "BlockPool", "Force", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto.Builder.class);
|
|
|
+ internal_static_DeleteBlockPoolResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(5);
|
|
|
+ internal_static_DeleteBlockPoolResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_DeleteBlockPoolResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto.Builder.class);
|
|
|
+ internal_static_GetBlockLocalPathInfoRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(6);
|
|
|
+ internal_static_GetBlockLocalPathInfoRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_GetBlockLocalPathInfoRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "Block", "Token", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto.Builder.class);
|
|
|
+ internal_static_GetBlockLocalPathInfoResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(7);
|
|
|
+ internal_static_GetBlockLocalPathInfoResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_GetBlockLocalPathInfoResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { "Block", "LocalPath", "LocalMetaPath", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto.Builder.class);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ .internalBuildGeneratedFileFrom(descriptorData,
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
|
|
|
+ }, assigner);
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(outer_class_scope)
|
|
|
+}
|