|
@@ -0,0 +1,17153 @@
|
|
|
|
+// Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
|
|
+// source: DatanodeProtocol.proto
|
|
|
|
+
|
|
|
|
+package org.apache.hadoop.hdfs.protocol.proto;
|
|
|
|
+
|
|
|
|
+public final class DatanodeProtocolProtos {
|
|
|
|
+ private DatanodeProtocolProtos() {}
|
|
|
|
+ public static void registerAllExtensions(
|
|
|
|
+ com.google.protobuf.ExtensionRegistry registry) {
|
|
|
|
+ }
|
|
|
|
+ public interface DatanodeRegistrationProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeIDProto datanodeID = 1;
|
|
|
|
+ boolean hasDatanodeID();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required .StorageInfoProto storateInfo = 2;
|
|
|
|
+ boolean hasStorateInfo();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorateInfo();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorateInfoOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required .ExportedBlockKeysProto keys = 3;
|
|
|
|
+ boolean hasKeys();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class DatanodeRegistrationProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements DatanodeRegistrationProtoOrBuilder {
|
|
|
|
+ // Use DatanodeRegistrationProto.newBuilder() to construct.
|
|
|
|
+ private DatanodeRegistrationProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private DatanodeRegistrationProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final DatanodeRegistrationProto defaultInstance;
|
|
|
|
+ public static DatanodeRegistrationProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public DatanodeRegistrationProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeIDProto datanodeID = 1;
|
|
|
|
+ public static final int DATANODEID_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_;
|
|
|
|
+ public boolean hasDatanodeID() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
|
|
|
|
+ return datanodeID_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
|
|
|
|
+ return datanodeID_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required .StorageInfoProto storateInfo = 2;
|
|
|
|
+ public static final int STORATEINFO_FIELD_NUMBER = 2;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storateInfo_;
|
|
|
|
+ public boolean hasStorateInfo() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorateInfo() {
|
|
|
|
+ return storateInfo_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorateInfoOrBuilder() {
|
|
|
|
+ return storateInfo_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required .ExportedBlockKeysProto keys = 3;
|
|
|
|
+ public static final int KEYS_FIELD_NUMBER = 3;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_;
|
|
|
|
+ public boolean hasKeys() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
|
|
|
|
+ return keys_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
|
|
|
|
+ return keys_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
|
|
|
|
+ storateInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
|
|
|
|
+ keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasDatanodeID()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasStorateInfo()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasKeys()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getDatanodeID().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getStorateInfo().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getKeys().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, datanodeID_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeMessage(2, storateInfo_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ output.writeMessage(3, keys_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, datanodeID_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(2, storateInfo_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(3, keys_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasDatanodeID() == other.hasDatanodeID());
|
|
|
|
+ if (hasDatanodeID()) {
|
|
|
|
+ result = result && getDatanodeID()
|
|
|
|
+ .equals(other.getDatanodeID());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasStorateInfo() == other.hasStorateInfo());
|
|
|
|
+ if (hasStorateInfo()) {
|
|
|
|
+ result = result && getStorateInfo()
|
|
|
|
+ .equals(other.getStorateInfo());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasKeys() == other.hasKeys());
|
|
|
|
+ if (hasKeys()) {
|
|
|
|
+ result = result && getKeys()
|
|
|
|
+ .equals(other.getKeys());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasDatanodeID()) {
|
|
|
|
+ hash = (37 * hash) + DATANODEID_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getDatanodeID().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasStorateInfo()) {
|
|
|
|
+ hash = (37 * hash) + STORATEINFO_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getStorateInfo().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasKeys()) {
|
|
|
|
+ hash = (37 * hash) + KEYS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getKeys().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeRegistrationProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getDatanodeIDFieldBuilder();
|
|
|
|
+ getStorateInfoFieldBuilder();
|
|
|
|
+ getKeysFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ datanodeIDBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ storateInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ storateInfoBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ result.datanodeID_ = datanodeID_;
|
|
|
|
+ } else {
|
|
|
|
+ result.datanodeID_ = datanodeIDBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ result.storateInfo_ = storateInfo_;
|
|
|
|
+ } else {
|
|
|
|
+ result.storateInfo_ = storateInfoBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ result.keys_ = keys_;
|
|
|
|
+ } else {
|
|
|
|
+ result.keys_ = keysBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasDatanodeID()) {
|
|
|
|
+ mergeDatanodeID(other.getDatanodeID());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasStorateInfo()) {
|
|
|
|
+ mergeStorateInfo(other.getStorateInfo());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasKeys()) {
|
|
|
|
+ mergeKeys(other.getKeys());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasDatanodeID()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasStorateInfo()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasKeys()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getDatanodeID().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getStorateInfo().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getKeys().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
|
|
|
|
+ if (hasDatanodeID()) {
|
|
|
|
+ subBuilder.mergeFrom(getDatanodeID());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setDatanodeID(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 18: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder();
|
|
|
|
+ if (hasStorateInfo()) {
|
|
|
|
+ subBuilder.mergeFrom(getStorateInfo());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setStorateInfo(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 26: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder();
|
|
|
|
+ if (hasKeys()) {
|
|
|
|
+ subBuilder.mergeFrom(getKeys());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setKeys(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeIDProto datanodeID = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> datanodeIDBuilder_;
|
|
|
|
+ public boolean hasDatanodeID() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDatanodeID() {
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ return datanodeID_;
|
|
|
|
+ } else {
|
|
|
|
+ return datanodeIDBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ datanodeID_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ datanodeIDBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setDatanodeID(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ datanodeID_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ datanodeIDBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeDatanodeID(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ datanodeID_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
|
|
|
|
+ datanodeID_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder(datanodeID_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ datanodeID_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ datanodeIDBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearDatanodeID() {
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ datanodeID_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ datanodeIDBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getDatanodeIDBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getDatanodeIDFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getDatanodeIDOrBuilder() {
|
|
|
|
+ if (datanodeIDBuilder_ != null) {
|
|
|
|
+ return datanodeIDBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return datanodeID_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
|
|
+ getDatanodeIDFieldBuilder() {
|
|
|
|
+ if (datanodeIDBuilder_ == null) {
|
|
|
|
+ datanodeIDBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
|
|
|
|
+ datanodeID_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ datanodeID_ = null;
|
|
|
|
+ }
|
|
|
|
+ return datanodeIDBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required .StorageInfoProto storateInfo = 2;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storateInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storateInfoBuilder_;
|
|
|
|
+ public boolean hasStorateInfo() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorateInfo() {
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ return storateInfo_;
|
|
|
|
+ } else {
|
|
|
|
+ return storateInfoBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setStorateInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ storateInfo_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ storateInfoBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setStorateInfo(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ storateInfo_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ storateInfoBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeStorateInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
|
|
|
+ storateInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
|
|
|
|
+ storateInfo_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storateInfo_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ storateInfo_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ storateInfoBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearStorateInfo() {
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ storateInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ storateInfoBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorateInfoBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getStorateInfoFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorateInfoOrBuilder() {
|
|
|
|
+ if (storateInfoBuilder_ != null) {
|
|
|
|
+ return storateInfoBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return storateInfo_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>
|
|
|
|
+ getStorateInfoFieldBuilder() {
|
|
|
|
+ if (storateInfoBuilder_ == null) {
|
|
|
|
+ storateInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
|
|
|
|
+ storateInfo_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ storateInfo_ = null;
|
|
|
|
+ }
|
|
|
|
+ return storateInfoBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required .ExportedBlockKeysProto keys = 3;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
|
|
|
|
+ public boolean hasKeys() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ return keys_;
|
|
|
|
+ } else {
|
|
|
|
+ return keysBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ keys_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setKeys(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keys_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
|
|
|
|
+ keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) {
|
|
|
|
+ keys_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ keys_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearKeys() {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getKeysFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
|
|
|
|
+ if (keysBuilder_ != null) {
|
|
|
|
+ return keysBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return keys_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>
|
|
|
|
+ getKeysFieldBuilder() {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keysBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>(
|
|
|
|
+ keys_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ keys_ = null;
|
|
|
|
+ }
|
|
|
|
+ return keysBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:DatanodeRegistrationProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new DatanodeRegistrationProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:DatanodeRegistrationProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface DatanodeCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeCommandProto.Type cmdType = 1;
|
|
|
|
+ boolean hasCmdType();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType();
|
|
|
|
+
|
|
|
|
+ // optional .BalancerBandwidthCommandProto balancerCmd = 2;
|
|
|
|
+ boolean hasBalancerCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional .BlockCommandProto blkCmd = 3;
|
|
|
|
+ boolean hasBlkCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional .BlockRecoveryCommndProto recoveryCmd = 4;
|
|
|
|
+ boolean hasRecoveryCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto getRecoveryCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder getRecoveryCmdOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional .FinalizeCommandProto finalizeCmd = 5;
|
|
|
|
+ boolean hasFinalizeCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional .KeyUpdateCommandProto keyUpdateCmd = 6;
|
|
|
|
+ boolean hasKeyUpdateCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional .RegisterCommandProto registerCmd = 7;
|
|
|
|
+ boolean hasRegisterCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto upgradeCmd = 8;
|
|
|
|
+ boolean hasUpgradeCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class DatanodeCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements DatanodeCommandProtoOrBuilder {
|
|
|
|
+ // Use DatanodeCommandProto.newBuilder() to construct.
|
|
|
|
+ private DatanodeCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private DatanodeCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final DatanodeCommandProto defaultInstance;
|
|
|
|
+ public static DatanodeCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public DatanodeCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public enum Type
|
|
|
|
+ implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
|
+ BalancerBandwidthCommand(0, 0),
|
|
|
|
+ BlockCommand(1, 1),
|
|
|
|
+ BlockRecoveryCommand(2, 2),
|
|
|
|
+ FinalizeCommand(3, 3),
|
|
|
|
+ KeyUpdateCommand(4, 4),
|
|
|
|
+ RegisterCommand(5, 5),
|
|
|
|
+ UpgradeCommand(6, 6),
|
|
|
|
+ ;
|
|
|
|
+
|
|
|
|
+ public static final int BalancerBandwidthCommand_VALUE = 0;
|
|
|
|
+ public static final int BlockCommand_VALUE = 1;
|
|
|
|
+ public static final int BlockRecoveryCommand_VALUE = 2;
|
|
|
|
+ public static final int FinalizeCommand_VALUE = 3;
|
|
|
|
+ public static final int KeyUpdateCommand_VALUE = 4;
|
|
|
|
+ public static final int RegisterCommand_VALUE = 5;
|
|
|
|
+ public static final int UpgradeCommand_VALUE = 6;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public final int getNumber() { return value; }
|
|
|
|
+
|
|
|
|
+ public static Type valueOf(int value) {
|
|
|
|
+ switch (value) {
|
|
|
|
+ case 0: return BalancerBandwidthCommand;
|
|
|
|
+ case 1: return BlockCommand;
|
|
|
|
+ case 2: return BlockRecoveryCommand;
|
|
|
|
+ case 3: return FinalizeCommand;
|
|
|
|
+ case 4: return KeyUpdateCommand;
|
|
|
|
+ case 5: return RegisterCommand;
|
|
|
|
+ case 6: return UpgradeCommand;
|
|
|
|
+ default: return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.Internal.EnumLiteMap<Type>
|
|
|
|
+ internalGetValueMap() {
|
|
|
|
+ return internalValueMap;
|
|
|
|
+ }
|
|
|
|
+ private static com.google.protobuf.Internal.EnumLiteMap<Type>
|
|
|
|
+ internalValueMap =
|
|
|
|
+ new com.google.protobuf.Internal.EnumLiteMap<Type>() {
|
|
|
|
+ public Type findValueByNumber(int number) {
|
|
|
|
+ return Type.valueOf(number);
|
|
|
|
+ }
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
|
+ getValueDescriptor() {
|
|
|
|
+ return getDescriptor().getValues().get(index);
|
|
|
|
+ }
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return getDescriptor();
|
|
|
|
+ }
|
|
|
|
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor().getEnumTypes().get(0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final Type[] VALUES = {
|
|
|
|
+ BalancerBandwidthCommand, BlockCommand, BlockRecoveryCommand, FinalizeCommand, KeyUpdateCommand, RegisterCommand, UpgradeCommand,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public static Type valueOf(
|
|
|
|
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
|
+ if (desc.getType() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "EnumValueDescriptor is not for this type.");
|
|
|
|
+ }
|
|
|
|
+ return VALUES[desc.getIndex()];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private final int index;
|
|
|
|
+ private final int value;
|
|
|
|
+
|
|
|
|
+ private Type(int index, int value) {
|
|
|
|
+ this.index = index;
|
|
|
|
+ this.value = value;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(enum_scope:DatanodeCommandProto.Type)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeCommandProto.Type cmdType = 1;
|
|
|
|
+ public static final int CMDTYPE_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_;
|
|
|
|
+ public boolean hasCmdType() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
|
|
|
|
+ return cmdType_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .BalancerBandwidthCommandProto balancerCmd = 2;
|
|
|
|
+ public static final int BALANCERCMD_FIELD_NUMBER = 2;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_;
|
|
|
|
+ public boolean hasBalancerCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
|
|
|
|
+ return balancerCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
|
|
|
|
+ return balancerCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .BlockCommandProto blkCmd = 3;
|
|
|
|
+ public static final int BLKCMD_FIELD_NUMBER = 3;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_;
|
|
|
|
+ public boolean hasBlkCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
|
|
|
|
+ return blkCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
|
|
|
|
+ return blkCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .BlockRecoveryCommndProto recoveryCmd = 4;
|
|
|
|
+ public static final int RECOVERYCMD_FIELD_NUMBER = 4;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto recoveryCmd_;
|
|
|
|
+ public boolean hasRecoveryCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto getRecoveryCmd() {
|
|
|
|
+ return recoveryCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder getRecoveryCmdOrBuilder() {
|
|
|
|
+ return recoveryCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .FinalizeCommandProto finalizeCmd = 5;
|
|
|
|
+ public static final int FINALIZECMD_FIELD_NUMBER = 5;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_;
|
|
|
|
+ public boolean hasFinalizeCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
|
|
|
|
+ return finalizeCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
|
|
|
|
+ return finalizeCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .KeyUpdateCommandProto keyUpdateCmd = 6;
|
|
|
|
+ public static final int KEYUPDATECMD_FIELD_NUMBER = 6;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_;
|
|
|
|
+ public boolean hasKeyUpdateCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
|
|
|
|
+ return keyUpdateCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
|
|
|
|
+ return keyUpdateCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .RegisterCommandProto registerCmd = 7;
|
|
|
|
+ public static final int REGISTERCMD_FIELD_NUMBER = 7;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_;
|
|
|
|
+ public boolean hasRegisterCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
|
|
|
|
+ return registerCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
|
|
|
|
+ return registerCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto upgradeCmd = 8;
|
|
|
|
+ public static final int UPGRADECMD_FIELD_NUMBER = 8;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto upgradeCmd_;
|
|
|
|
+ public boolean hasUpgradeCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd() {
|
|
|
|
+ return upgradeCmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder() {
|
|
|
|
+ return upgradeCmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
|
|
|
|
+ balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
|
|
|
|
+ blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
|
|
|
|
+ recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance();
|
|
|
|
+ finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
|
|
|
|
+ keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
|
|
|
|
+ registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
|
|
|
|
+ upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasCmdType()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (hasBalancerCmd()) {
|
|
|
|
+ if (!getBalancerCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasBlkCmd()) {
|
|
|
|
+ if (!getBlkCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasRecoveryCmd()) {
|
|
|
|
+ if (!getRecoveryCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasFinalizeCmd()) {
|
|
|
|
+ if (!getFinalizeCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasKeyUpdateCmd()) {
|
|
|
|
+ if (!getKeyUpdateCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasUpgradeCmd()) {
|
|
|
|
+ if (!getUpgradeCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeEnum(1, cmdType_.getNumber());
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeMessage(2, balancerCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ output.writeMessage(3, blkCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ output.writeMessage(4, recoveryCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ output.writeMessage(5, finalizeCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ output.writeMessage(6, keyUpdateCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
|
+ output.writeMessage(7, registerCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
|
+ output.writeMessage(8, upgradeCmd_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeEnumSize(1, cmdType_.getNumber());
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(2, balancerCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(3, blkCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(4, recoveryCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(5, finalizeCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(6, keyUpdateCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(7, registerCmd_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(8, upgradeCmd_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasCmdType() == other.hasCmdType());
|
|
|
|
+ if (hasCmdType()) {
|
|
|
|
+ result = result &&
|
|
|
|
+ (getCmdType() == other.getCmdType());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasBalancerCmd() == other.hasBalancerCmd());
|
|
|
|
+ if (hasBalancerCmd()) {
|
|
|
|
+ result = result && getBalancerCmd()
|
|
|
|
+ .equals(other.getBalancerCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasBlkCmd() == other.hasBlkCmd());
|
|
|
|
+ if (hasBlkCmd()) {
|
|
|
|
+ result = result && getBlkCmd()
|
|
|
|
+ .equals(other.getBlkCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasRecoveryCmd() == other.hasRecoveryCmd());
|
|
|
|
+ if (hasRecoveryCmd()) {
|
|
|
|
+ result = result && getRecoveryCmd()
|
|
|
|
+ .equals(other.getRecoveryCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasFinalizeCmd() == other.hasFinalizeCmd());
|
|
|
|
+ if (hasFinalizeCmd()) {
|
|
|
|
+ result = result && getFinalizeCmd()
|
|
|
|
+ .equals(other.getFinalizeCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasKeyUpdateCmd() == other.hasKeyUpdateCmd());
|
|
|
|
+ if (hasKeyUpdateCmd()) {
|
|
|
|
+ result = result && getKeyUpdateCmd()
|
|
|
|
+ .equals(other.getKeyUpdateCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasRegisterCmd() == other.hasRegisterCmd());
|
|
|
|
+ if (hasRegisterCmd()) {
|
|
|
|
+ result = result && getRegisterCmd()
|
|
|
|
+ .equals(other.getRegisterCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasUpgradeCmd() == other.hasUpgradeCmd());
|
|
|
|
+ if (hasUpgradeCmd()) {
|
|
|
|
+ result = result && getUpgradeCmd()
|
|
|
|
+ .equals(other.getUpgradeCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasCmdType()) {
|
|
|
|
+ hash = (37 * hash) + CMDTYPE_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashEnum(getCmdType());
|
|
|
|
+ }
|
|
|
|
+ if (hasBalancerCmd()) {
|
|
|
|
+ hash = (37 * hash) + BALANCERCMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBalancerCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasBlkCmd()) {
|
|
|
|
+ hash = (37 * hash) + BLKCMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlkCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasRecoveryCmd()) {
|
|
|
|
+ hash = (37 * hash) + RECOVERYCMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRecoveryCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasFinalizeCmd()) {
|
|
|
|
+ hash = (37 * hash) + FINALIZECMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getFinalizeCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasKeyUpdateCmd()) {
|
|
|
|
+ hash = (37 * hash) + KEYUPDATECMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getKeyUpdateCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasRegisterCmd()) {
|
|
|
|
+ hash = (37 * hash) + REGISTERCMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegisterCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasUpgradeCmd()) {
|
|
|
|
+ hash = (37 * hash) + UPGRADECMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getUpgradeCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_DatanodeCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getBalancerCmdFieldBuilder();
|
|
|
|
+ getBlkCmdFieldBuilder();
|
|
|
|
+ getRecoveryCmdFieldBuilder();
|
|
|
|
+ getFinalizeCmdFieldBuilder();
|
|
|
|
+ getKeyUpdateCmdFieldBuilder();
|
|
|
|
+ getRegisterCmdFieldBuilder();
|
|
|
|
+ getUpgradeCmdFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ balancerCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ blkCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ recoveryCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ finalizeCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ keyUpdateCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registerCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ upgradeCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ result.cmdType_ = cmdType_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ result.balancerCmd_ = balancerCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.balancerCmd_ = balancerCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ result.blkCmd_ = blkCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.blkCmd_ = blkCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
|
+ }
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ result.recoveryCmd_ = recoveryCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.recoveryCmd_ = recoveryCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ to_bitField0_ |= 0x00000010;
|
|
|
|
+ }
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ result.finalizeCmd_ = finalizeCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.finalizeCmd_ = finalizeCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ to_bitField0_ |= 0x00000020;
|
|
|
|
+ }
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ result.keyUpdateCmd_ = keyUpdateCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.keyUpdateCmd_ = keyUpdateCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
|
+ to_bitField0_ |= 0x00000040;
|
|
|
|
+ }
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ result.registerCmd_ = registerCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registerCmd_ = registerCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
|
+ to_bitField0_ |= 0x00000080;
|
|
|
|
+ }
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ result.upgradeCmd_ = upgradeCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.upgradeCmd_ = upgradeCmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasCmdType()) {
|
|
|
|
+ setCmdType(other.getCmdType());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasBalancerCmd()) {
|
|
|
|
+ mergeBalancerCmd(other.getBalancerCmd());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasBlkCmd()) {
|
|
|
|
+ mergeBlkCmd(other.getBlkCmd());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasRecoveryCmd()) {
|
|
|
|
+ mergeRecoveryCmd(other.getRecoveryCmd());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasFinalizeCmd()) {
|
|
|
|
+ mergeFinalizeCmd(other.getFinalizeCmd());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasKeyUpdateCmd()) {
|
|
|
|
+ mergeKeyUpdateCmd(other.getKeyUpdateCmd());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasRegisterCmd()) {
|
|
|
|
+ mergeRegisterCmd(other.getRegisterCmd());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasUpgradeCmd()) {
|
|
|
|
+ mergeUpgradeCmd(other.getUpgradeCmd());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasCmdType()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (hasBalancerCmd()) {
|
|
|
|
+ if (!getBalancerCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasBlkCmd()) {
|
|
|
|
+ if (!getBlkCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasRecoveryCmd()) {
|
|
|
|
+ if (!getRecoveryCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasFinalizeCmd()) {
|
|
|
|
+ if (!getFinalizeCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasKeyUpdateCmd()) {
|
|
|
|
+ if (!getKeyUpdateCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (hasUpgradeCmd()) {
|
|
|
|
+ if (!getUpgradeCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 8: {
|
|
|
|
+ int rawValue = input.readEnum();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.valueOf(rawValue);
|
|
|
|
+ if (value == null) {
|
|
|
|
+ unknownFields.mergeVarintField(1, rawValue);
|
|
|
|
+ } else {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ cmdType_ = value;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 18: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder();
|
|
|
|
+ if (hasBalancerCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getBalancerCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setBalancerCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 26: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder();
|
|
|
|
+ if (hasBlkCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getBlkCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setBlkCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 34: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.newBuilder();
|
|
|
|
+ if (hasRecoveryCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getRecoveryCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRecoveryCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 42: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder();
|
|
|
|
+ if (hasFinalizeCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getFinalizeCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setFinalizeCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 50: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder();
|
|
|
|
+ if (hasKeyUpdateCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getKeyUpdateCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setKeyUpdateCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 58: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder();
|
|
|
|
+ if (hasRegisterCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegisterCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegisterCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 66: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder();
|
|
|
|
+ if (hasUpgradeCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getUpgradeCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setUpgradeCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeCommandProto.Type cmdType = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
|
|
|
|
+ public boolean hasCmdType() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type getCmdType() {
|
|
|
|
+ return cmdType_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmdType(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ cmdType_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCmdType() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .BalancerBandwidthCommandProto balancerCmd = 2;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder> balancerCmdBuilder_;
|
|
|
|
+ public boolean hasBalancerCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getBalancerCmd() {
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ return balancerCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return balancerCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ balancerCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ balancerCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBalancerCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder builderForValue) {
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ balancerCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ balancerCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) {
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
|
|
|
|
+ balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) {
|
|
|
|
+ balancerCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(balancerCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ balancerCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ balancerCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBalancerCmd() {
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ balancerCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder getBalancerCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getBalancerCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder getBalancerCmdOrBuilder() {
|
|
|
|
+ if (balancerCmdBuilder_ != null) {
|
|
|
|
+ return balancerCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return balancerCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>
|
|
|
|
+ getBalancerCmdFieldBuilder() {
|
|
|
|
+ if (balancerCmdBuilder_ == null) {
|
|
|
|
+ balancerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder>(
|
|
|
|
+ balancerCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ balancerCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return balancerCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .BlockCommandProto blkCmd = 3;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder> blkCmdBuilder_;
|
|
|
|
+ public boolean hasBlkCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getBlkCmd() {
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ return blkCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return blkCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ blkCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blkCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlkCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder builderForValue) {
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ blkCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blkCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeBlkCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto value) {
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
|
|
|
|
+ blkCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) {
|
|
|
|
+ blkCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder(blkCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ blkCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blkCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlkCmd() {
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blkCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder getBlkCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getBlkCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder getBlkCmdOrBuilder() {
|
|
|
|
+ if (blkCmdBuilder_ != null) {
|
|
|
|
+ return blkCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return blkCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>
|
|
|
|
+ getBlkCmdFieldBuilder() {
|
|
|
|
+ if (blkCmdBuilder_ == null) {
|
|
|
|
+ blkCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder>(
|
|
|
|
+ blkCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ blkCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blkCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .BlockRecoveryCommndProto recoveryCmd = 4;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder> recoveryCmdBuilder_;
|
|
|
|
+ public boolean hasRecoveryCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto getRecoveryCmd() {
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ return recoveryCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return recoveryCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto value) {
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ recoveryCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ recoveryCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRecoveryCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder builderForValue) {
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ recoveryCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ recoveryCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRecoveryCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto value) {
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
|
|
|
|
+ recoveryCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance()) {
|
|
|
|
+ recoveryCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.newBuilder(recoveryCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ recoveryCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ recoveryCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRecoveryCmd() {
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ recoveryCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder getRecoveryCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRecoveryCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder getRecoveryCmdOrBuilder() {
|
|
|
|
+ if (recoveryCmdBuilder_ != null) {
|
|
|
|
+ return recoveryCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return recoveryCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder>
|
|
|
|
+ getRecoveryCmdFieldBuilder() {
|
|
|
|
+ if (recoveryCmdBuilder_ == null) {
|
|
|
|
+ recoveryCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder>(
|
|
|
|
+ recoveryCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ recoveryCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return recoveryCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .FinalizeCommandProto finalizeCmd = 5;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder> finalizeCmdBuilder_;
|
|
|
|
+ public boolean hasFinalizeCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getFinalizeCmd() {
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ return finalizeCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return finalizeCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ finalizeCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ finalizeCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setFinalizeCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ finalizeCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ finalizeCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeFinalizeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto value) {
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010) &&
|
|
|
|
+ finalizeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) {
|
|
|
|
+ finalizeCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder(finalizeCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ finalizeCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ finalizeCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearFinalizeCmd() {
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ finalizeCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder getFinalizeCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getFinalizeCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder getFinalizeCmdOrBuilder() {
|
|
|
|
+ if (finalizeCmdBuilder_ != null) {
|
|
|
|
+ return finalizeCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return finalizeCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>
|
|
|
|
+ getFinalizeCmdFieldBuilder() {
|
|
|
|
+ if (finalizeCmdBuilder_ == null) {
|
|
|
|
+ finalizeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder>(
|
|
|
|
+ finalizeCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ finalizeCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return finalizeCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .KeyUpdateCommandProto keyUpdateCmd = 6;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder> keyUpdateCmdBuilder_;
|
|
|
|
+ public boolean hasKeyUpdateCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getKeyUpdateCmd() {
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ return keyUpdateCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return keyUpdateCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ keyUpdateCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keyUpdateCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setKeyUpdateCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder builderForValue) {
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ keyUpdateCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keyUpdateCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) {
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020) &&
|
|
|
|
+ keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) {
|
|
|
|
+ keyUpdateCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ keyUpdateCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keyUpdateCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearKeyUpdateCmd() {
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keyUpdateCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder getKeyUpdateCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getKeyUpdateCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder getKeyUpdateCmdOrBuilder() {
|
|
|
|
+ if (keyUpdateCmdBuilder_ != null) {
|
|
|
|
+ return keyUpdateCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return keyUpdateCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>
|
|
|
|
+ getKeyUpdateCmdFieldBuilder() {
|
|
|
|
+ if (keyUpdateCmdBuilder_ == null) {
|
|
|
|
+ keyUpdateCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder>(
|
|
|
|
+ keyUpdateCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ keyUpdateCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return keyUpdateCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .RegisterCommandProto registerCmd = 7;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder> registerCmdBuilder_;
|
|
|
|
+ public boolean hasRegisterCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getRegisterCmd() {
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ return registerCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return registerCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registerCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registerCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegisterCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder builderForValue) {
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ registerCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registerCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegisterCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto value) {
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040) &&
|
|
|
|
+ registerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) {
|
|
|
|
+ registerCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder(registerCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registerCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registerCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegisterCmd() {
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registerCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder getRegisterCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegisterCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder getRegisterCmdOrBuilder() {
|
|
|
|
+ if (registerCmdBuilder_ != null) {
|
|
|
|
+ return registerCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registerCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>
|
|
|
|
+ getRegisterCmdFieldBuilder() {
|
|
|
|
+ if (registerCmdBuilder_ == null) {
|
|
|
|
+ registerCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder>(
|
|
|
|
+ registerCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registerCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registerCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto upgradeCmd = 8;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> upgradeCmdBuilder_;
|
|
|
|
+ public boolean hasUpgradeCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getUpgradeCmd() {
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ return upgradeCmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return upgradeCmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setUpgradeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ upgradeCmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ upgradeCmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setUpgradeCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ upgradeCmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ upgradeCmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeUpgradeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080) &&
|
|
|
|
+ upgradeCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) {
|
|
|
|
+ upgradeCmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(upgradeCmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ upgradeCmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ upgradeCmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearUpgradeCmd() {
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ upgradeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ upgradeCmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getUpgradeCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getUpgradeCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getUpgradeCmdOrBuilder() {
|
|
|
|
+ if (upgradeCmdBuilder_ != null) {
|
|
|
|
+ return upgradeCmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return upgradeCmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>
|
|
|
|
+ getUpgradeCmdFieldBuilder() {
|
|
|
|
+ if (upgradeCmdBuilder_ == null) {
|
|
|
|
+ upgradeCmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>(
|
|
|
|
+ upgradeCmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ upgradeCmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return upgradeCmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:DatanodeCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new DatanodeCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:DatanodeCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BalancerBandwidthCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required uint64 bandwidth = 1;
|
|
|
|
+ boolean hasBandwidth();
|
|
|
|
+ long getBandwidth();
|
|
|
|
+ }
|
|
|
|
+ public static final class BalancerBandwidthCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BalancerBandwidthCommandProtoOrBuilder {
|
|
|
|
+ // Use BalancerBandwidthCommandProto.newBuilder() to construct.
|
|
|
|
+ private BalancerBandwidthCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BalancerBandwidthCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BalancerBandwidthCommandProto defaultInstance;
|
|
|
|
+ public static BalancerBandwidthCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BalancerBandwidthCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required uint64 bandwidth = 1;
|
|
|
|
+ public static final int BANDWIDTH_FIELD_NUMBER = 1;
|
|
|
|
+ private long bandwidth_;
|
|
|
|
+ public boolean hasBandwidth() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public long getBandwidth() {
|
|
|
|
+ return bandwidth_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ bandwidth_ = 0L;
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasBandwidth()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeUInt64(1, bandwidth_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(1, bandwidth_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasBandwidth() == other.hasBandwidth());
|
|
|
|
+ if (hasBandwidth()) {
|
|
|
|
+ result = result && (getBandwidth()
|
|
|
|
+ == other.getBandwidth());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasBandwidth()) {
|
|
|
|
+ hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getBandwidth());
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BalancerBandwidthCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ bandwidth_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ result.bandwidth_ = bandwidth_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasBandwidth()) {
|
|
|
|
+ setBandwidth(other.getBandwidth());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasBandwidth()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 8: {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ bandwidth_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required uint64 bandwidth = 1;
|
|
|
|
+ private long bandwidth_ ;
|
|
|
|
+ public boolean hasBandwidth() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public long getBandwidth() {
|
|
|
|
+ return bandwidth_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBandwidth(long value) {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ bandwidth_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBandwidth() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ bandwidth_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BalancerBandwidthCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BalancerBandwidthCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BalancerBandwidthCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required uint32 action = 1;
|
|
|
|
+ boolean hasAction();
|
|
|
|
+ int getAction();
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ boolean hasBlockPoolId();
|
|
|
|
+ String getBlockPoolId();
|
|
|
|
+
|
|
|
|
+ // repeated .BlockProto blocks = 3;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>
|
|
|
|
+ getBlocksList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
|
|
|
|
+ int getBlocksCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeIDsProto targets = 4;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto>
|
|
|
|
+ getTargetsList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto getTargets(int index);
|
|
|
|
+ int getTargetsCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder>
|
|
|
|
+ getTargetsOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder getTargetsOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class BlockCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BlockCommandProtoOrBuilder {
|
|
|
|
+ // Use BlockCommandProto.newBuilder() to construct.
|
|
|
|
+ private BlockCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BlockCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BlockCommandProto defaultInstance;
|
|
|
|
+ public static BlockCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BlockCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public enum Action
|
|
|
|
+ implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
|
+ UNKNOWN(0, 0),
|
|
|
|
+ TRANSFER(1, 1),
|
|
|
|
+ INVALIDATE(2, 2),
|
|
|
|
+ SHUTDOWN(3, 3),
|
|
|
|
+ ;
|
|
|
|
+
|
|
|
|
+ public static final int UNKNOWN_VALUE = 0;
|
|
|
|
+ public static final int TRANSFER_VALUE = 1;
|
|
|
|
+ public static final int INVALIDATE_VALUE = 2;
|
|
|
|
+ public static final int SHUTDOWN_VALUE = 3;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public final int getNumber() { return value; }
|
|
|
|
+
|
|
|
|
+ public static Action valueOf(int value) {
|
|
|
|
+ switch (value) {
|
|
|
|
+ case 0: return UNKNOWN;
|
|
|
|
+ case 1: return TRANSFER;
|
|
|
|
+ case 2: return INVALIDATE;
|
|
|
|
+ case 3: return SHUTDOWN;
|
|
|
|
+ default: return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.Internal.EnumLiteMap<Action>
|
|
|
|
+ internalGetValueMap() {
|
|
|
|
+ return internalValueMap;
|
|
|
|
+ }
|
|
|
|
+ private static com.google.protobuf.Internal.EnumLiteMap<Action>
|
|
|
|
+ internalValueMap =
|
|
|
|
+ new com.google.protobuf.Internal.EnumLiteMap<Action>() {
|
|
|
|
+ public Action findValueByNumber(int number) {
|
|
|
|
+ return Action.valueOf(number);
|
|
|
|
+ }
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
|
+ getValueDescriptor() {
|
|
|
|
+ return getDescriptor().getValues().get(index);
|
|
|
|
+ }
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return getDescriptor();
|
|
|
|
+ }
|
|
|
|
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor().getEnumTypes().get(0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final Action[] VALUES = {
|
|
|
|
+ UNKNOWN, TRANSFER, INVALIDATE, SHUTDOWN,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public static Action valueOf(
|
|
|
|
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
|
+ if (desc.getType() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "EnumValueDescriptor is not for this type.");
|
|
|
|
+ }
|
|
|
|
+ return VALUES[desc.getIndex()];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private final int index;
|
|
|
|
+ private final int value;
|
|
|
|
+
|
|
|
|
+ private Action(int index, int value) {
|
|
|
|
+ this.index = index;
|
|
|
|
+ this.value = value;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(enum_scope:BlockCommandProto.Action)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required uint32 action = 1;
|
|
|
|
+ public static final int ACTION_FIELD_NUMBER = 1;
|
|
|
|
+ private int action_;
|
|
|
|
+ public boolean hasAction() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public int getAction() {
|
|
|
|
+ return action_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
|
|
|
|
+ private java.lang.Object blockPoolId_;
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ } else {
|
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ }
|
|
|
|
+ return s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.ByteString getBlockPoolIdBytes() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
|
+ blockPoolId_ = b;
|
|
|
|
+ return b;
|
|
|
|
+ } else {
|
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .BlockProto blocks = 3;
|
|
|
|
+ public static final int BLOCKS_FIELD_NUMBER = 3;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeIDsProto targets = 4;
|
|
|
|
+ public static final int TARGETS_FIELD_NUMBER = 4;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto> targets_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto> getTargetsList() {
|
|
|
|
+ return targets_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder>
|
|
|
|
+ getTargetsOrBuilderList() {
|
|
|
|
+ return targets_;
|
|
|
|
+ }
|
|
|
|
+ public int getTargetsCount() {
|
|
|
|
+ return targets_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto getTargets(int index) {
|
|
|
|
+ return targets_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder getTargetsOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return targets_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ action_ = 0;
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ targets_ = java.util.Collections.emptyList();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasAction()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getTargetsCount(); i++) {
|
|
|
|
+ if (!getTargets(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeUInt32(1, action_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeBytes(2, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ output.writeMessage(3, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < targets_.size(); i++) {
|
|
|
|
+ output.writeMessage(4, targets_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(1, action_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBytesSize(2, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(3, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < targets_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(4, targets_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasAction() == other.hasAction());
|
|
|
|
+ if (hasAction()) {
|
|
|
|
+ result = result && (getAction()
|
|
|
|
+ == other.getAction());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasBlockPoolId() == other.hasBlockPoolId());
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ result = result && getBlockPoolId()
|
|
|
|
+ .equals(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ result = result && getBlocksList()
|
|
|
|
+ .equals(other.getBlocksList());
|
|
|
|
+ result = result && getTargetsList()
|
|
|
|
+ .equals(other.getTargetsList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasAction()) {
|
|
|
|
+ hash = (37 * hash) + ACTION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getAction();
|
|
|
|
+ }
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlockPoolId().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (getBlocksCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlocksList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (getTargetsCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + TARGETS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getTargetsList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getBlocksFieldBuilder();
|
|
|
|
+ getTargetsFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ action_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ targets_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ result.action_ = action_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.blockPoolId_ = blockPoolId_;
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ blocks_ = java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ }
|
|
|
|
+ result.blocks_ = blocks_;
|
|
|
|
+ } else {
|
|
|
|
+ result.blocks_ = blocksBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ targets_ = java.util.Collections.unmodifiableList(targets_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ }
|
|
|
|
+ result.targets_ = targets_;
|
|
|
|
+ } else {
|
|
|
|
+ result.targets_ = targetsBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasAction()) {
|
|
|
|
+ setAction(other.getAction());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasBlockPoolId()) {
|
|
|
|
+ setBlockPoolId(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocks_.isEmpty()) {
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ } else {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.addAll(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocksBuilder_.isEmpty()) {
|
|
|
|
+ blocksBuilder_.dispose();
|
|
|
|
+ blocksBuilder_ = null;
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ blocksBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getBlocksFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ if (!other.targets_.isEmpty()) {
|
|
|
|
+ if (targets_.isEmpty()) {
|
|
|
|
+ targets_ = other.targets_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ } else {
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.addAll(other.targets_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.targets_.isEmpty()) {
|
|
|
|
+ if (targetsBuilder_.isEmpty()) {
|
|
|
|
+ targetsBuilder_.dispose();
|
|
|
|
+ targetsBuilder_ = null;
|
|
|
|
+ targets_ = other.targets_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ targetsBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getTargetsFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.addAllMessages(other.targets_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasAction()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getTargetsCount(); i++) {
|
|
|
|
+ if (!getTargets(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 8: {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ action_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 18: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = input.readBytes();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 26: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addBlocks(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 34: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addTargets(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required uint32 action = 1;
|
|
|
|
+ private int action_ ;
|
|
|
|
+ public boolean hasAction() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public int getAction() {
|
|
|
|
+ return action_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setAction(int value) {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ action_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearAction() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ action_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ private java.lang.Object blockPoolId_ = "";
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ return s;
|
|
|
|
+ } else {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlockPoolId(String value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlockPoolId() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ blockPoolId_ = getDefaultInstance().getBlockPoolId();
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ void setBlockPoolId(com.google.protobuf.ByteString value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .BlockProto blocks = 3;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureBlocksIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>(blocks_);
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllBlocks(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ super.addAll(values, blocks_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlocks() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index); } else {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ if (blocksBuilder_ != null) {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder>
|
|
|
|
+ getBlocksBuilderList() {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
|
|
|
|
+ getBlocksFieldBuilder() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
|
|
|
|
+ blocks_,
|
|
|
|
+ ((bitField0_ & 0x00000004) == 0x00000004),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ blocks_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blocksBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeIDsProto targets = 4;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto> targets_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureTargetsIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ targets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto>(targets_);
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder> targetsBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto> getTargetsList() {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(targets_);
|
|
|
|
+ } else {
|
|
|
|
+ return targetsBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getTargetsCount() {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ return targets_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return targetsBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto getTargets(int index) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ return targets_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return targetsBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setTargets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto value) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setTargets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder builderForValue) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto value) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addTargets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto value) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addTargets(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder builderForValue) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addTargets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder builderForValue) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllTargets(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto> values) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ super.addAll(values, targets_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearTargets() {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ targets_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeTargets(int index) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ ensureTargetsIsMutable();
|
|
|
|
+ targets_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ targetsBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder getTargetsBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getTargetsFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder getTargetsOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ return targets_.get(index); } else {
|
|
|
|
+ return targetsBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder>
|
|
|
|
+ getTargetsOrBuilderList() {
|
|
|
|
+ if (targetsBuilder_ != null) {
|
|
|
|
+ return targetsBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(targets_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder addTargetsBuilder() {
|
|
|
|
+ return getTargetsFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder addTargetsBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getTargetsFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder>
|
|
|
|
+ getTargetsBuilderList() {
|
|
|
|
+ return getTargetsFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder>
|
|
|
|
+ getTargetsFieldBuilder() {
|
|
|
|
+ if (targetsBuilder_ == null) {
|
|
|
|
+ targetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDsProtoOrBuilder>(
|
|
|
|
+ targets_,
|
|
|
|
+ ((bitField0_ & 0x00000008) == 0x00000008),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ targets_ = null;
|
|
|
|
+ }
|
|
|
|
+ return targetsBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BlockCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BlockCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BlockCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockRecoveryCommndProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // repeated .RecoveringBlockProto blocks = 1;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto>
|
|
|
|
+ getBlocksList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index);
|
|
|
|
+ int getBlocksCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class BlockRecoveryCommndProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BlockRecoveryCommndProtoOrBuilder {
|
|
|
|
+ // Use BlockRecoveryCommndProto.newBuilder() to construct.
|
|
|
|
+ private BlockRecoveryCommndProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BlockRecoveryCommndProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BlockRecoveryCommndProto defaultInstance;
|
|
|
|
+ public static BlockRecoveryCommndProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BlockRecoveryCommndProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommndProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommndProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .RecoveringBlockProto blocks = 1;
|
|
|
|
+ public static final int BLOCKS_FIELD_NUMBER = 1;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto> blocks_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto> getBlocksList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ output.writeMessage(1, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && getBlocksList()
|
|
|
|
+ .equals(other.getBlocksList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (getBlocksCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlocksList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommndProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockRecoveryCommndProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getBlocksFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ blocks_ = java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ }
|
|
|
|
+ result.blocks_ = blocks_;
|
|
|
|
+ } else {
|
|
|
|
+ result.blocks_ = blocksBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.getDefaultInstance()) return this;
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocks_.isEmpty()) {
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ } else {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.addAll(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocksBuilder_.isEmpty()) {
|
|
|
|
+ blocksBuilder_.dispose();
|
|
|
|
+ blocksBuilder_ = null;
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ blocksBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getBlocksFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addBlocks(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // repeated .RecoveringBlockProto blocks = 1;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto> blocks_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureBlocksIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto>(blocks_);
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder> blocksBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto> getBlocksList() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto getBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllBlocks(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto> values) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ super.addAll(values, blocks_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlocks() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder getBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index); } else {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ if (blocksBuilder_ != null) {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder addBlocksBuilder() {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder addBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder>
|
|
|
|
+ getBlocksBuilderList() {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>
|
|
|
|
+ getBlocksFieldBuilder() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProtoOrBuilder>(
|
|
|
|
+ blocks_,
|
|
|
|
+ ((bitField0_ & 0x00000001) == 0x00000001),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ blocks_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blocksBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BlockRecoveryCommndProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BlockRecoveryCommndProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BlockRecoveryCommndProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface FinalizeCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 1;
|
|
|
|
+ boolean hasBlockPoolId();
|
|
|
|
+ String getBlockPoolId();
|
|
|
|
+ }
|
|
|
|
+ public static final class FinalizeCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements FinalizeCommandProtoOrBuilder {
|
|
|
|
+ // Use FinalizeCommandProto.newBuilder() to construct.
|
|
|
|
+ private FinalizeCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private FinalizeCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final FinalizeCommandProto defaultInstance;
|
|
|
|
+ public static FinalizeCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public FinalizeCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required string blockPoolId = 1;
|
|
|
|
+ public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
|
|
|
|
+ private java.lang.Object blockPoolId_;
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ } else {
|
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ }
|
|
|
|
+ return s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.ByteString getBlockPoolIdBytes() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
|
+ blockPoolId_ = b;
|
|
|
|
+ return b;
|
|
|
|
+ } else {
|
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeBytes(1, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBytesSize(1, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasBlockPoolId() == other.hasBlockPoolId());
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ result = result && getBlockPoolId()
|
|
|
|
+ .equals(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlockPoolId().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_FinalizeCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ result.blockPoolId_ = blockPoolId_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasBlockPoolId()) {
|
|
|
|
+ setBlockPoolId(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ blockPoolId_ = input.readBytes();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 1;
|
|
|
|
+ private java.lang.Object blockPoolId_ = "";
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ return s;
|
|
|
|
+ } else {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlockPoolId(String value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlockPoolId() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ blockPoolId_ = getDefaultInstance().getBlockPoolId();
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ void setBlockPoolId(com.google.protobuf.ByteString value) {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:FinalizeCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new FinalizeCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:FinalizeCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface KeyUpdateCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .ExportedBlockKeysProto keys = 1;
|
|
|
|
+ boolean hasKeys();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class KeyUpdateCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements KeyUpdateCommandProtoOrBuilder {
|
|
|
|
+ // Use KeyUpdateCommandProto.newBuilder() to construct.
|
|
|
|
+ private KeyUpdateCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private KeyUpdateCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final KeyUpdateCommandProto defaultInstance;
|
|
|
|
+ public static KeyUpdateCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public KeyUpdateCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .ExportedBlockKeysProto keys = 1;
|
|
|
|
+ public static final int KEYS_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_;
|
|
|
|
+ public boolean hasKeys() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
|
|
|
|
+ return keys_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
|
|
|
|
+ return keys_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasKeys()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getKeys().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, keys_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, keys_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasKeys() == other.hasKeys());
|
|
|
|
+ if (hasKeys()) {
|
|
|
|
+ result = result && getKeys()
|
|
|
|
+ .equals(other.getKeys());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasKeys()) {
|
|
|
|
+ hash = (37 * hash) + KEYS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getKeys().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_KeyUpdateCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getKeysFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ result.keys_ = keys_;
|
|
|
|
+ } else {
|
|
|
|
+ result.keys_ = keysBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasKeys()) {
|
|
|
|
+ mergeKeys(other.getKeys());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasKeys()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getKeys().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder();
|
|
|
|
+ if (hasKeys()) {
|
|
|
|
+ subBuilder.mergeFrom(getKeys());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setKeys(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .ExportedBlockKeysProto keys = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder> keysBuilder_;
|
|
|
|
+ public boolean hasKeys() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto getKeys() {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ return keys_;
|
|
|
|
+ } else {
|
|
|
|
+ return keysBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ keys_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setKeys(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder builderForValue) {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keys_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeKeys(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto value) {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ keys_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance()) {
|
|
|
|
+ keys_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.newBuilder(keys_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ keys_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearKeys() {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keys_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ keysBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder getKeysBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getKeysFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder getKeysOrBuilder() {
|
|
|
|
+ if (keysBuilder_ != null) {
|
|
|
|
+ return keysBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return keys_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>
|
|
|
|
+ getKeysFieldBuilder() {
|
|
|
|
+ if (keysBuilder_ == null) {
|
|
|
|
+ keysBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProtoOrBuilder>(
|
|
|
|
+ keys_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ keys_ = null;
|
|
|
|
+ }
|
|
|
|
+ return keysBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:KeyUpdateCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new KeyUpdateCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:KeyUpdateCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface RegisterCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+ }
|
|
|
|
+ public static final class RegisterCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements RegisterCommandProtoOrBuilder {
|
|
|
|
+ // Use RegisterCommandProto.newBuilder() to construct.
|
|
|
|
+ private RegisterCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private RegisterCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final RegisterCommandProto defaultInstance;
|
|
|
|
+ public static RegisterCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public RegisterCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto(this);
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:RegisterCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new RegisterCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:RegisterCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface UpgradeCommandProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required uint32 action = 1;
|
|
|
|
+ boolean hasAction();
|
|
|
|
+ int getAction();
|
|
|
|
+
|
|
|
|
+ // required uint32 version = 2;
|
|
|
|
+ boolean hasVersion();
|
|
|
|
+ int getVersion();
|
|
|
|
+
|
|
|
|
+ // required uint32 upgradeStatus = 3;
|
|
|
|
+ boolean hasUpgradeStatus();
|
|
|
|
+ int getUpgradeStatus();
|
|
|
|
+ }
|
|
|
|
+ public static final class UpgradeCommandProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements UpgradeCommandProtoOrBuilder {
|
|
|
|
+ // Use UpgradeCommandProto.newBuilder() to construct.
|
|
|
|
+ private UpgradeCommandProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private UpgradeCommandProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final UpgradeCommandProto defaultInstance;
|
|
|
|
+ public static UpgradeCommandProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public UpgradeCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public enum Action
|
|
|
|
+ implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
|
+ UNKNOWN(0, 0),
|
|
|
|
+ REPORT_STATUS(1, 100),
|
|
|
|
+ START_UPGRADE(2, 101),
|
|
|
|
+ ;
|
|
|
|
+
|
|
|
|
+ public static final int UNKNOWN_VALUE = 0;
|
|
|
|
+ public static final int REPORT_STATUS_VALUE = 100;
|
|
|
|
+ public static final int START_UPGRADE_VALUE = 101;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public final int getNumber() { return value; }
|
|
|
|
+
|
|
|
|
+ public static Action valueOf(int value) {
|
|
|
|
+ switch (value) {
|
|
|
|
+ case 0: return UNKNOWN;
|
|
|
|
+ case 100: return REPORT_STATUS;
|
|
|
|
+ case 101: return START_UPGRADE;
|
|
|
|
+ default: return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.Internal.EnumLiteMap<Action>
|
|
|
|
+ internalGetValueMap() {
|
|
|
|
+ return internalValueMap;
|
|
|
|
+ }
|
|
|
|
+ private static com.google.protobuf.Internal.EnumLiteMap<Action>
|
|
|
|
+ internalValueMap =
|
|
|
|
+ new com.google.protobuf.Internal.EnumLiteMap<Action>() {
|
|
|
|
+ public Action findValueByNumber(int number) {
|
|
|
|
+ return Action.valueOf(number);
|
|
|
|
+ }
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
|
+ getValueDescriptor() {
|
|
|
|
+ return getDescriptor().getValues().get(index);
|
|
|
|
+ }
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return getDescriptor();
|
|
|
|
+ }
|
|
|
|
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDescriptor().getEnumTypes().get(0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final Action[] VALUES = {
|
|
|
|
+ UNKNOWN, REPORT_STATUS, START_UPGRADE,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public static Action valueOf(
|
|
|
|
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
|
+ if (desc.getType() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "EnumValueDescriptor is not for this type.");
|
|
|
|
+ }
|
|
|
|
+ return VALUES[desc.getIndex()];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private final int index;
|
|
|
|
+ private final int value;
|
|
|
|
+
|
|
|
|
+ private Action(int index, int value) {
|
|
|
|
+ this.index = index;
|
|
|
|
+ this.value = value;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(enum_scope:UpgradeCommandProto.Action)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required uint32 action = 1;
|
|
|
|
+ public static final int ACTION_FIELD_NUMBER = 1;
|
|
|
|
+ private int action_;
|
|
|
|
+ public boolean hasAction() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public int getAction() {
|
|
|
|
+ return action_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 version = 2;
|
|
|
|
+ public static final int VERSION_FIELD_NUMBER = 2;
|
|
|
|
+ private int version_;
|
|
|
|
+ public boolean hasVersion() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public int getVersion() {
|
|
|
|
+ return version_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 upgradeStatus = 3;
|
|
|
|
+ public static final int UPGRADESTATUS_FIELD_NUMBER = 3;
|
|
|
|
+ private int upgradeStatus_;
|
|
|
|
+ public boolean hasUpgradeStatus() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public int getUpgradeStatus() {
|
|
|
|
+ return upgradeStatus_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ action_ = 0;
|
|
|
|
+ version_ = 0;
|
|
|
|
+ upgradeStatus_ = 0;
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasAction()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasVersion()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasUpgradeStatus()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeUInt32(1, action_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeUInt32(2, version_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ output.writeUInt32(3, upgradeStatus_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(1, action_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(2, version_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(3, upgradeStatus_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasAction() == other.hasAction());
|
|
|
|
+ if (hasAction()) {
|
|
|
|
+ result = result && (getAction()
|
|
|
|
+ == other.getAction());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasVersion() == other.hasVersion());
|
|
|
|
+ if (hasVersion()) {
|
|
|
|
+ result = result && (getVersion()
|
|
|
|
+ == other.getVersion());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasUpgradeStatus() == other.hasUpgradeStatus());
|
|
|
|
+ if (hasUpgradeStatus()) {
|
|
|
|
+ result = result && (getUpgradeStatus()
|
|
|
|
+ == other.getUpgradeStatus());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasAction()) {
|
|
|
|
+ hash = (37 * hash) + ACTION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getAction();
|
|
|
|
+ }
|
|
|
|
+ if (hasVersion()) {
|
|
|
|
+ hash = (37 * hash) + VERSION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getVersion();
|
|
|
|
+ }
|
|
|
|
+ if (hasUpgradeStatus()) {
|
|
|
|
+ hash = (37 * hash) + UPGRADESTATUS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getUpgradeStatus();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_UpgradeCommandProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ action_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ version_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ upgradeStatus_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ result.action_ = action_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.version_ = version_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ result.upgradeStatus_ = upgradeStatus_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasAction()) {
|
|
|
|
+ setAction(other.getAction());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasVersion()) {
|
|
|
|
+ setVersion(other.getVersion());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasUpgradeStatus()) {
|
|
|
|
+ setUpgradeStatus(other.getUpgradeStatus());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasAction()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasVersion()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasUpgradeStatus()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 8: {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ action_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 16: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ version_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 24: {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ upgradeStatus_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required uint32 action = 1;
|
|
|
|
+ private int action_ ;
|
|
|
|
+ public boolean hasAction() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public int getAction() {
|
|
|
|
+ return action_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setAction(int value) {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ action_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearAction() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ action_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 version = 2;
|
|
|
|
+ private int version_ ;
|
|
|
|
+ public boolean hasVersion() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public int getVersion() {
|
|
|
|
+ return version_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setVersion(int value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ version_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearVersion() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ version_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 upgradeStatus = 3;
|
|
|
|
+ private int upgradeStatus_ ;
|
|
|
|
+ public boolean hasUpgradeStatus() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public int getUpgradeStatus() {
|
|
|
|
+ return upgradeStatus_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setUpgradeStatus(int value) {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ upgradeStatus_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearUpgradeStatus() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ upgradeStatus_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:UpgradeCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new UpgradeCommandProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:UpgradeCommandProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface RegisterDatanodeRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ boolean hasRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class RegisterDatanodeRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements RegisterDatanodeRequestProtoOrBuilder {
|
|
|
|
+ // Use RegisterDatanodeRequestProto.newBuilder() to construct.
|
|
|
|
+ private RegisterDatanodeRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private RegisterDatanodeRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final RegisterDatanodeRequestProto defaultInstance;
|
|
|
|
+ public static RegisterDatanodeRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public RegisterDatanodeRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ result = result && getRegistration()
|
|
|
|
+ .equals(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ result.registration_ = registration_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasRegistration()) {
|
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ return registration_;
|
|
|
|
+ } else {
|
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registration_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
|
|
|
|
+ registration_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registration_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
|
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
|
|
|
|
+ registration_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registration_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registrationBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:RegisterDatanodeRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new RegisterDatanodeRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:RegisterDatanodeRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface RegisterDatanodeResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ boolean hasRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class RegisterDatanodeResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements RegisterDatanodeResponseProtoOrBuilder {
|
|
|
|
+ // Use RegisterDatanodeResponseProto.newBuilder() to construct.
|
|
|
|
+ private RegisterDatanodeResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private RegisterDatanodeResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final RegisterDatanodeResponseProto defaultInstance;
|
|
|
|
+ public static RegisterDatanodeResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public RegisterDatanodeResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ result = result && getRegistration()
|
|
|
|
+ .equals(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_RegisterDatanodeResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ result.registration_ = registration_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasRegistration()) {
|
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ return registration_;
|
|
|
|
+ } else {
|
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registration_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
|
|
|
|
+ registration_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registration_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
|
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
|
|
|
|
+ registration_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registration_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registrationBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:RegisterDatanodeResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new RegisterDatanodeResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:RegisterDatanodeResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface HeartbeatRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ boolean hasRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required uint64 capacity = 2;
|
|
|
|
+ boolean hasCapacity();
|
|
|
|
+ long getCapacity();
|
|
|
|
+
|
|
|
|
+ // required uint64 dfsUsed = 3;
|
|
|
|
+ boolean hasDfsUsed();
|
|
|
|
+ long getDfsUsed();
|
|
|
|
+
|
|
|
|
+ // required uint64 remaining = 4;
|
|
|
|
+ boolean hasRemaining();
|
|
|
|
+ long getRemaining();
|
|
|
|
+
|
|
|
|
+ // required uint64 blockPoolUsed = 5;
|
|
|
|
+ boolean hasBlockPoolUsed();
|
|
|
|
+ long getBlockPoolUsed();
|
|
|
|
+
|
|
|
|
+ // required uint32 xmitsInProgress = 6;
|
|
|
|
+ boolean hasXmitsInProgress();
|
|
|
|
+ int getXmitsInProgress();
|
|
|
|
+
|
|
|
|
+ // required uint32 xceiverCount = 7;
|
|
|
|
+ boolean hasXceiverCount();
|
|
|
|
+ int getXceiverCount();
|
|
|
|
+
|
|
|
|
+ // required uint32 failedVolumes = 8;
|
|
|
|
+ boolean hasFailedVolumes();
|
|
|
|
+ int getFailedVolumes();
|
|
|
|
+ }
|
|
|
|
+ public static final class HeartbeatRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements HeartbeatRequestProtoOrBuilder {
|
|
|
|
+ // Use HeartbeatRequestProto.newBuilder() to construct.
|
|
|
|
+ private HeartbeatRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private HeartbeatRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final HeartbeatRequestProto defaultInstance;
|
|
|
|
+ public static HeartbeatRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public HeartbeatRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 capacity = 2;
|
|
|
|
+ public static final int CAPACITY_FIELD_NUMBER = 2;
|
|
|
|
+ private long capacity_;
|
|
|
|
+ public boolean hasCapacity() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public long getCapacity() {
|
|
|
|
+ return capacity_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 dfsUsed = 3;
|
|
|
|
+ public static final int DFSUSED_FIELD_NUMBER = 3;
|
|
|
|
+ private long dfsUsed_;
|
|
|
|
+ public boolean hasDfsUsed() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public long getDfsUsed() {
|
|
|
|
+ return dfsUsed_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 remaining = 4;
|
|
|
|
+ public static final int REMAINING_FIELD_NUMBER = 4;
|
|
|
|
+ private long remaining_;
|
|
|
|
+ public boolean hasRemaining() {
|
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
|
+ }
|
|
|
|
+ public long getRemaining() {
|
|
|
|
+ return remaining_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 blockPoolUsed = 5;
|
|
|
|
+ public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5;
|
|
|
|
+ private long blockPoolUsed_;
|
|
|
|
+ public boolean hasBlockPoolUsed() {
|
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
|
+ }
|
|
|
|
+ public long getBlockPoolUsed() {
|
|
|
|
+ return blockPoolUsed_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 xmitsInProgress = 6;
|
|
|
|
+ public static final int XMITSINPROGRESS_FIELD_NUMBER = 6;
|
|
|
|
+ private int xmitsInProgress_;
|
|
|
|
+ public boolean hasXmitsInProgress() {
|
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
|
+ }
|
|
|
|
+ public int getXmitsInProgress() {
|
|
|
|
+ return xmitsInProgress_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 xceiverCount = 7;
|
|
|
|
+ public static final int XCEIVERCOUNT_FIELD_NUMBER = 7;
|
|
|
|
+ private int xceiverCount_;
|
|
|
|
+ public boolean hasXceiverCount() {
|
|
|
|
+ return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
|
+ }
|
|
|
|
+ public int getXceiverCount() {
|
|
|
|
+ return xceiverCount_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 failedVolumes = 8;
|
|
|
|
+ public static final int FAILEDVOLUMES_FIELD_NUMBER = 8;
|
|
|
|
+ private int failedVolumes_;
|
|
|
|
+ public boolean hasFailedVolumes() {
|
|
|
|
+ return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
|
+ }
|
|
|
|
+ public int getFailedVolumes() {
|
|
|
|
+ return failedVolumes_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ capacity_ = 0L;
|
|
|
|
+ dfsUsed_ = 0L;
|
|
|
|
+ remaining_ = 0L;
|
|
|
|
+ blockPoolUsed_ = 0L;
|
|
|
|
+ xmitsInProgress_ = 0;
|
|
|
|
+ xceiverCount_ = 0;
|
|
|
|
+ failedVolumes_ = 0;
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasCapacity()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasDfsUsed()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasRemaining()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolUsed()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasXmitsInProgress()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasXceiverCount()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasFailedVolumes()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeUInt64(2, capacity_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ output.writeUInt64(3, dfsUsed_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ output.writeUInt64(4, remaining_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ output.writeUInt64(5, blockPoolUsed_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ output.writeUInt32(6, xmitsInProgress_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
|
+ output.writeUInt32(7, xceiverCount_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
|
+ output.writeUInt32(8, failedVolumes_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(2, capacity_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(3, dfsUsed_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(4, remaining_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(5, blockPoolUsed_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(6, xmitsInProgress_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(7, xceiverCount_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(8, failedVolumes_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ result = result && getRegistration()
|
|
|
|
+ .equals(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasCapacity() == other.hasCapacity());
|
|
|
|
+ if (hasCapacity()) {
|
|
|
|
+ result = result && (getCapacity()
|
|
|
|
+ == other.getCapacity());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasDfsUsed() == other.hasDfsUsed());
|
|
|
|
+ if (hasDfsUsed()) {
|
|
|
|
+ result = result && (getDfsUsed()
|
|
|
|
+ == other.getDfsUsed());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasRemaining() == other.hasRemaining());
|
|
|
|
+ if (hasRemaining()) {
|
|
|
|
+ result = result && (getRemaining()
|
|
|
|
+ == other.getRemaining());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasBlockPoolUsed() == other.hasBlockPoolUsed());
|
|
|
|
+ if (hasBlockPoolUsed()) {
|
|
|
|
+ result = result && (getBlockPoolUsed()
|
|
|
|
+ == other.getBlockPoolUsed());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasXmitsInProgress() == other.hasXmitsInProgress());
|
|
|
|
+ if (hasXmitsInProgress()) {
|
|
|
|
+ result = result && (getXmitsInProgress()
|
|
|
|
+ == other.getXmitsInProgress());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasXceiverCount() == other.hasXceiverCount());
|
|
|
|
+ if (hasXceiverCount()) {
|
|
|
|
+ result = result && (getXceiverCount()
|
|
|
|
+ == other.getXceiverCount());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasFailedVolumes() == other.hasFailedVolumes());
|
|
|
|
+ if (hasFailedVolumes()) {
|
|
|
|
+ result = result && (getFailedVolumes()
|
|
|
|
+ == other.getFailedVolumes());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasCapacity()) {
|
|
|
|
+ hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getCapacity());
|
|
|
|
+ }
|
|
|
|
+ if (hasDfsUsed()) {
|
|
|
|
+ hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getDfsUsed());
|
|
|
|
+ }
|
|
|
|
+ if (hasRemaining()) {
|
|
|
|
+ hash = (37 * hash) + REMAINING_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getRemaining());
|
|
|
|
+ }
|
|
|
|
+ if (hasBlockPoolUsed()) {
|
|
|
|
+ hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getBlockPoolUsed());
|
|
|
|
+ }
|
|
|
|
+ if (hasXmitsInProgress()) {
|
|
|
|
+ hash = (37 * hash) + XMITSINPROGRESS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getXmitsInProgress();
|
|
|
|
+ }
|
|
|
|
+ if (hasXceiverCount()) {
|
|
|
|
+ hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getXceiverCount();
|
|
|
|
+ }
|
|
|
|
+ if (hasFailedVolumes()) {
|
|
|
|
+ hash = (37 * hash) + FAILEDVOLUMES_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getFailedVolumes();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ capacity_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ dfsUsed_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ remaining_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ blockPoolUsed_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
|
+ xmitsInProgress_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ xceiverCount_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
|
+ failedVolumes_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ result.registration_ = registration_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.capacity_ = capacity_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ result.dfsUsed_ = dfsUsed_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
|
+ }
|
|
|
|
+ result.remaining_ = remaining_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ to_bitField0_ |= 0x00000010;
|
|
|
|
+ }
|
|
|
|
+ result.blockPoolUsed_ = blockPoolUsed_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ to_bitField0_ |= 0x00000020;
|
|
|
|
+ }
|
|
|
|
+ result.xmitsInProgress_ = xmitsInProgress_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
|
+ to_bitField0_ |= 0x00000040;
|
|
|
|
+ }
|
|
|
|
+ result.xceiverCount_ = xceiverCount_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
|
+ to_bitField0_ |= 0x00000080;
|
|
|
|
+ }
|
|
|
|
+ result.failedVolumes_ = failedVolumes_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasRegistration()) {
|
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasCapacity()) {
|
|
|
|
+ setCapacity(other.getCapacity());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasDfsUsed()) {
|
|
|
|
+ setDfsUsed(other.getDfsUsed());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasRemaining()) {
|
|
|
|
+ setRemaining(other.getRemaining());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasBlockPoolUsed()) {
|
|
|
|
+ setBlockPoolUsed(other.getBlockPoolUsed());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasXmitsInProgress()) {
|
|
|
|
+ setXmitsInProgress(other.getXmitsInProgress());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasXceiverCount()) {
|
|
|
|
+ setXceiverCount(other.getXceiverCount());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasFailedVolumes()) {
|
|
|
|
+ setFailedVolumes(other.getFailedVolumes());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasCapacity()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasDfsUsed()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasRemaining()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolUsed()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasXmitsInProgress()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasXceiverCount()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasFailedVolumes()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 16: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ capacity_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 24: {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ dfsUsed_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 32: {
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ remaining_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 40: {
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ blockPoolUsed_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 48: {
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ xmitsInProgress_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 56: {
|
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
|
+ xceiverCount_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 64: {
|
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
|
+ failedVolumes_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ return registration_;
|
|
|
|
+ } else {
|
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registration_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
|
|
|
|
+ registration_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registration_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
|
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
|
|
|
|
+ registration_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registration_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registrationBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 capacity = 2;
|
|
|
|
+ private long capacity_ ;
|
|
|
|
+ public boolean hasCapacity() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public long getCapacity() {
|
|
|
|
+ return capacity_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCapacity(long value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ capacity_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCapacity() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ capacity_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 dfsUsed = 3;
|
|
|
|
+ private long dfsUsed_ ;
|
|
|
|
+ public boolean hasDfsUsed() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public long getDfsUsed() {
|
|
|
|
+ return dfsUsed_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setDfsUsed(long value) {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ dfsUsed_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearDfsUsed() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ dfsUsed_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 remaining = 4;
|
|
|
|
+ private long remaining_ ;
|
|
|
|
+ public boolean hasRemaining() {
|
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
|
+ }
|
|
|
|
+ public long getRemaining() {
|
|
|
|
+ return remaining_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRemaining(long value) {
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ remaining_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRemaining() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ remaining_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 blockPoolUsed = 5;
|
|
|
|
+ private long blockPoolUsed_ ;
|
|
|
|
+ public boolean hasBlockPoolUsed() {
|
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
|
+ }
|
|
|
|
+ public long getBlockPoolUsed() {
|
|
|
|
+ return blockPoolUsed_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlockPoolUsed(long value) {
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ blockPoolUsed_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlockPoolUsed() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
|
+ blockPoolUsed_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 xmitsInProgress = 6;
|
|
|
|
+ private int xmitsInProgress_ ;
|
|
|
|
+ public boolean hasXmitsInProgress() {
|
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
|
+ }
|
|
|
|
+ public int getXmitsInProgress() {
|
|
|
|
+ return xmitsInProgress_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setXmitsInProgress(int value) {
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ xmitsInProgress_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearXmitsInProgress() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ xmitsInProgress_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 xceiverCount = 7;
|
|
|
|
+ private int xceiverCount_ ;
|
|
|
|
+ public boolean hasXceiverCount() {
|
|
|
|
+ return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
|
+ }
|
|
|
|
+ public int getXceiverCount() {
|
|
|
|
+ return xceiverCount_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setXceiverCount(int value) {
|
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
|
+ xceiverCount_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearXceiverCount() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
|
+ xceiverCount_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 failedVolumes = 8;
|
|
|
|
+ private int failedVolumes_ ;
|
|
|
|
+ public boolean hasFailedVolumes() {
|
|
|
|
+ return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
|
+ }
|
|
|
|
+ public int getFailedVolumes() {
|
|
|
|
+ return failedVolumes_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setFailedVolumes(int value) {
|
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
|
+ failedVolumes_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearFailedVolumes() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
|
+ failedVolumes_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:HeartbeatRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new HeartbeatRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:HeartbeatRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface HeartbeatResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeCommandProto cmds = 1;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto>
|
|
|
|
+ getCmdsList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index);
|
|
|
|
+ int getCmdsCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
|
|
|
|
+ getCmdsOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class HeartbeatResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements HeartbeatResponseProtoOrBuilder {
|
|
|
|
+ // Use HeartbeatResponseProto.newBuilder() to construct.
|
|
|
|
+ private HeartbeatResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private HeartbeatResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final HeartbeatResponseProto defaultInstance;
|
|
|
|
+ public static HeartbeatResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public HeartbeatResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeCommandProto cmds = 1;
|
|
|
|
+ public static final int CMDS_FIELD_NUMBER = 1;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> cmds_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> getCmdsList() {
|
|
|
|
+ return cmds_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
|
|
|
|
+ getCmdsOrBuilderList() {
|
|
|
|
+ return cmds_;
|
|
|
|
+ }
|
|
|
|
+ public int getCmdsCount() {
|
|
|
|
+ return cmds_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
|
|
|
|
+ return cmds_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return cmds_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ cmds_ = java.util.Collections.emptyList();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ for (int i = 0; i < getCmdsCount(); i++) {
|
|
|
|
+ if (!getCmds(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ for (int i = 0; i < cmds_.size(); i++) {
|
|
|
|
+ output.writeMessage(1, cmds_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ for (int i = 0; i < cmds_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, cmds_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && getCmdsList()
|
|
|
|
+ .equals(other.getCmdsList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (getCmdsCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + CMDS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getCmdsList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_HeartbeatResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getCmdsFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ cmds_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ cmds_ = java.util.Collections.unmodifiableList(cmds_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ }
|
|
|
|
+ result.cmds_ = cmds_;
|
|
|
|
+ } else {
|
|
|
|
+ result.cmds_ = cmdsBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ if (!other.cmds_.isEmpty()) {
|
|
|
|
+ if (cmds_.isEmpty()) {
|
|
|
|
+ cmds_ = other.cmds_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ } else {
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.addAll(other.cmds_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.cmds_.isEmpty()) {
|
|
|
|
+ if (cmdsBuilder_.isEmpty()) {
|
|
|
|
+ cmdsBuilder_.dispose();
|
|
|
|
+ cmdsBuilder_ = null;
|
|
|
|
+ cmds_ = other.cmds_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ cmdsBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getCmdsFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.addAllMessages(other.cmds_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ for (int i = 0; i < getCmdsCount(); i++) {
|
|
|
|
+ if (!getCmds(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addCmds(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeCommandProto cmds = 1;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> cmds_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureCmdsIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ cmds_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto>(cmds_);
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdsBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> getCmdsList() {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(cmds_);
|
|
|
|
+ } else {
|
|
|
|
+ return cmdsBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getCmdsCount() {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ return cmds_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return cmdsBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmds(int index) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ return cmds_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return cmdsBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmds(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmds(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addCmds(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addCmds(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addCmds(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addCmds(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllCmds(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto> values) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ super.addAll(values, cmds_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCmds() {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ cmds_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeCmds(int index) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ ensureCmdsIsMutable();
|
|
|
|
+ cmds_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdsBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdsBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getCmdsFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdsOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ return cmds_.get(index); } else {
|
|
|
|
+ return cmdsBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
|
|
|
|
+ getCmdsOrBuilderList() {
|
|
|
|
+ if (cmdsBuilder_ != null) {
|
|
|
|
+ return cmdsBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(cmds_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder() {
|
|
|
|
+ return getCmdsFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder addCmdsBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getCmdsFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder>
|
|
|
|
+ getCmdsBuilderList() {
|
|
|
|
+ return getCmdsFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
|
|
|
|
+ getCmdsFieldBuilder() {
|
|
|
|
+ if (cmdsBuilder_ == null) {
|
|
|
|
+ cmdsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
|
|
|
|
+ cmds_,
|
|
|
|
+ ((bitField0_ & 0x00000001) == 0x00000001),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ cmds_ = null;
|
|
|
|
+ }
|
|
|
|
+ return cmdsBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:HeartbeatResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new HeartbeatResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:HeartbeatResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockReportRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ boolean hasRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ boolean hasBlockPoolId();
|
|
|
|
+ String getBlockPoolId();
|
|
|
|
+
|
|
|
|
+ // repeated uint64 blocks = 3 [packed = true];
|
|
|
|
+ java.util.List<java.lang.Long> getBlocksList();
|
|
|
|
+ int getBlocksCount();
|
|
|
|
+ long getBlocks(int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class BlockReportRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BlockReportRequestProtoOrBuilder {
|
|
|
|
+ // Use BlockReportRequestProto.newBuilder() to construct.
|
|
|
|
+ private BlockReportRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BlockReportRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BlockReportRequestProto defaultInstance;
|
|
|
|
+ public static BlockReportRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BlockReportRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
|
|
|
|
+ private java.lang.Object blockPoolId_;
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ } else {
|
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ }
|
|
|
|
+ return s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.ByteString getBlockPoolIdBytes() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
|
+ blockPoolId_ = b;
|
|
|
|
+ return b;
|
|
|
|
+ } else {
|
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated uint64 blocks = 3 [packed = true];
|
|
|
|
+ public static final int BLOCKS_FIELD_NUMBER = 3;
|
|
|
|
+ private java.util.List<java.lang.Long> blocks_;
|
|
|
|
+ public java.util.List<java.lang.Long>
|
|
|
|
+ getBlocksList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ }
|
|
|
|
+ public long getBlocks(int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+ private int blocksMemoizedSerializedSize = -1;
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();;
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeBytes(2, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ if (getBlocksList().size() > 0) {
|
|
|
|
+ output.writeRawVarint32(26);
|
|
|
|
+ output.writeRawVarint32(blocksMemoizedSerializedSize);
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ output.writeUInt64NoTag(blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBytesSize(2, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ {
|
|
|
|
+ int dataSize = 0;
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ dataSize += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64SizeNoTag(blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += dataSize;
|
|
|
|
+ if (!getBlocksList().isEmpty()) {
|
|
|
|
+ size += 1;
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeInt32SizeNoTag(dataSize);
|
|
|
|
+ }
|
|
|
|
+ blocksMemoizedSerializedSize = dataSize;
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ result = result && getRegistration()
|
|
|
|
+ .equals(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasBlockPoolId() == other.hasBlockPoolId());
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ result = result && getBlockPoolId()
|
|
|
|
+ .equals(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ result = result && getBlocksList()
|
|
|
|
+ .equals(other.getBlocksList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlockPoolId().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (getBlocksCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlocksList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ result.registration_ = registration_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.blockPoolId_ = blockPoolId_;
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ blocks_ = java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ }
|
|
|
|
+ result.blocks_ = blocks_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasRegistration()) {
|
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasBlockPoolId()) {
|
|
|
|
+ setBlockPoolId(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocks_.isEmpty()) {
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ } else {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.addAll(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 18: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = input.readBytes();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 24: {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(input.readUInt64());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 26: {
|
|
|
|
+ int length = input.readRawVarint32();
|
|
|
|
+ int limit = input.pushLimit(length);
|
|
|
|
+ while (input.getBytesUntilLimit() > 0) {
|
|
|
|
+ addBlocks(input.readUInt64());
|
|
|
|
+ }
|
|
|
|
+ input.popLimit(limit);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ return registration_;
|
|
|
|
+ } else {
|
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registration_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
|
|
|
|
+ registration_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registration_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
|
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
|
|
|
|
+ registration_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registration_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registrationBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ private java.lang.Object blockPoolId_ = "";
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ return s;
|
|
|
|
+ } else {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlockPoolId(String value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlockPoolId() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ blockPoolId_ = getDefaultInstance().getBlockPoolId();
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ void setBlockPoolId(com.google.protobuf.ByteString value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated uint64 blocks = 3 [packed = true];
|
|
|
|
+ private java.util.List<java.lang.Long> blocks_ = java.util.Collections.emptyList();;
|
|
|
|
+ private void ensureBlocksIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ blocks_ = new java.util.ArrayList<java.lang.Long>(blocks_);
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<java.lang.Long>
|
|
|
|
+ getBlocksList() {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ }
|
|
|
|
+ public long getBlocks(int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, long value) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(long value) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllBlocks(
|
|
|
|
+ java.lang.Iterable<? extends java.lang.Long> values) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ super.addAll(values, blocks_);
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlocks() {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BlockReportRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BlockReportRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BlockReportRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockReportResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeCommandProto cmd = 1;
|
|
|
|
+ boolean hasCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class BlockReportResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BlockReportResponseProtoOrBuilder {
|
|
|
|
+ // Use BlockReportResponseProto.newBuilder() to construct.
|
|
|
|
+ private BlockReportResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BlockReportResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BlockReportResponseProto defaultInstance;
|
|
|
|
+ public static BlockReportResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BlockReportResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeCommandProto cmd = 1;
|
|
|
|
+ public static final int CMD_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_;
|
|
|
|
+ public boolean hasCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasCmd()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, cmd_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, cmd_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasCmd() == other.hasCmd());
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ result = result && getCmd()
|
|
|
|
+ .equals(other.getCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ hash = (37 * hash) + CMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReportResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getCmdFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ result.cmd_ = cmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.cmd_ = cmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasCmd()) {
|
|
|
|
+ mergeCmd(other.getCmd());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasCmd()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder();
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeCommandProto cmd = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder> cmdBuilder_;
|
|
|
|
+ public boolean hasCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto getCmd() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ return cmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return cmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ cmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto value) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance()) {
|
|
|
|
+ cmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ cmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCmd() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder getCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder getCmdOrBuilder() {
|
|
|
|
+ if (cmdBuilder_ != null) {
|
|
|
|
+ return cmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>
|
|
|
|
+ getCmdFieldBuilder() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProtoOrBuilder>(
|
|
|
|
+ cmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ cmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return cmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BlockReportResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BlockReportResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BlockReportResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ReceivedDeletedBlockInfoProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .BlockProto block = 1;
|
|
|
|
+ boolean hasBlock();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder();
|
|
|
|
+
|
|
|
|
+ // optional string deleteHint = 2;
|
|
|
|
+ boolean hasDeleteHint();
|
|
|
|
+ String getDeleteHint();
|
|
|
|
+ }
|
|
|
|
+ public static final class ReceivedDeletedBlockInfoProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ReceivedDeletedBlockInfoProtoOrBuilder {
|
|
|
|
+ // Use ReceivedDeletedBlockInfoProto.newBuilder() to construct.
|
|
|
|
+ private ReceivedDeletedBlockInfoProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ReceivedDeletedBlockInfoProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ReceivedDeletedBlockInfoProto defaultInstance;
|
|
|
|
+ public static ReceivedDeletedBlockInfoProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .BlockProto block = 1;
|
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_;
|
|
|
|
+ public boolean hasBlock() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
|
|
|
|
+ return block_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
|
+ return block_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional string deleteHint = 2;
|
|
|
|
+ public static final int DELETEHINT_FIELD_NUMBER = 2;
|
|
|
|
+ private java.lang.Object deleteHint_;
|
|
|
|
+ public boolean hasDeleteHint() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getDeleteHint() {
|
|
|
|
+ java.lang.Object ref = deleteHint_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ } else {
|
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
|
+ deleteHint_ = s;
|
|
|
|
+ }
|
|
|
|
+ return s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.ByteString getDeleteHintBytes() {
|
|
|
|
+ java.lang.Object ref = deleteHint_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
|
+ deleteHint_ = b;
|
|
|
|
+ return b;
|
|
|
|
+ } else {
|
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
|
+ deleteHint_ = "";
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasBlock()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, block_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeBytes(2, getDeleteHintBytes());
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBytesSize(2, getDeleteHintBytes());
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
|
+ if (hasBlock()) {
|
|
|
|
+ result = result && getBlock()
|
|
|
|
+ .equals(other.getBlock());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasDeleteHint() == other.hasDeleteHint());
|
|
|
|
+ if (hasDeleteHint()) {
|
|
|
|
+ result = result && getDeleteHint()
|
|
|
|
+ .equals(other.getDeleteHint());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasBlock()) {
|
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasDeleteHint()) {
|
|
|
|
+ hash = (37 * hash) + DELETEHINT_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getDeleteHint().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getBlockFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ deleteHint_ = "";
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ result.block_ = block_;
|
|
|
|
+ } else {
|
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.deleteHint_ = deleteHint_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasBlock()) {
|
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasDeleteHint()) {
|
|
|
|
+ setDeleteHint(other.getDeleteHint());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasBlock()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder();
|
|
|
|
+ if (hasBlock()) {
|
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 18: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ deleteHint_ = input.readBytes();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .BlockProto block = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blockBuilder_;
|
|
|
|
+ public boolean hasBlock() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlock() {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ return block_;
|
|
|
|
+ } else {
|
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ block_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlock(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ block_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) {
|
|
|
|
+ block_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ block_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlock() {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlockBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return block_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>
|
|
|
|
+ getBlockFieldBuilder() {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
|
|
|
|
+ block_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ block_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blockBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // optional string deleteHint = 2;
|
|
|
|
+ private java.lang.Object deleteHint_ = "";
|
|
|
|
+ public boolean hasDeleteHint() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getDeleteHint() {
|
|
|
|
+ java.lang.Object ref = deleteHint_;
|
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
|
+ deleteHint_ = s;
|
|
|
|
+ return s;
|
|
|
|
+ } else {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setDeleteHint(String value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ deleteHint_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearDeleteHint() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ deleteHint_ = getDefaultInstance().getDeleteHint();
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ void setDeleteHint(com.google.protobuf.ByteString value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ deleteHint_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ReceivedDeletedBlockInfoProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ReceivedDeletedBlockInfoProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ReceivedDeletedBlockInfoProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockReceivedAndDeletedRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ boolean hasRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ boolean hasBlockPoolId();
|
|
|
|
+ String getBlockPoolId();
|
|
|
|
+
|
|
|
|
+ // repeated .ReceivedDeletedBlockInfoProto blocks = 3;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto>
|
|
|
|
+ getBlocksList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index);
|
|
|
|
+ int getBlocksCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class BlockReceivedAndDeletedRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BlockReceivedAndDeletedRequestProtoOrBuilder {
|
|
|
|
+ // Use BlockReceivedAndDeletedRequestProto.newBuilder() to construct.
|
|
|
|
+ private BlockReceivedAndDeletedRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BlockReceivedAndDeletedRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BlockReceivedAndDeletedRequestProto defaultInstance;
|
|
|
|
+ public static BlockReceivedAndDeletedRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
|
|
|
|
+ private java.lang.Object blockPoolId_;
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ } else {
|
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ }
|
|
|
|
+ return s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.ByteString getBlockPoolIdBytes() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
|
+ blockPoolId_ = b;
|
|
|
|
+ return b;
|
|
|
|
+ } else {
|
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .ReceivedDeletedBlockInfoProto blocks = 3;
|
|
|
|
+ public static final int BLOCKS_FIELD_NUMBER = 3;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> blocks_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> getBlocksList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeBytes(2, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ output.writeMessage(3, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBytesSize(2, getBlockPoolIdBytes());
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(3, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ result = result && getRegistration()
|
|
|
|
+ .equals(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasBlockPoolId() == other.hasBlockPoolId());
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ result = result && getBlockPoolId()
|
|
|
|
+ .equals(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ result = result && getBlocksList()
|
|
|
|
+ .equals(other.getBlocksList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasBlockPoolId()) {
|
|
|
|
+ hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlockPoolId().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (getBlocksCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlocksList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
|
+ getBlocksFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ blockPoolId_ = "";
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ result.registration_ = registration_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.blockPoolId_ = blockPoolId_;
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ blocks_ = java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ }
|
|
|
|
+ result.blocks_ = blocks_;
|
|
|
|
+ } else {
|
|
|
|
+ result.blocks_ = blocksBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasRegistration()) {
|
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasBlockPoolId()) {
|
|
|
|
+ setBlockPoolId(other.getBlockPoolId());
|
|
|
|
+ }
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocks_.isEmpty()) {
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ } else {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.addAll(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocksBuilder_.isEmpty()) {
|
|
|
|
+ blocksBuilder_.dispose();
|
|
|
|
+ blocksBuilder_ = null;
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ blocksBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getBlocksFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasRegistration()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasBlockPoolId()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
|
|
|
|
+ if (hasRegistration()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 18: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = input.readBytes();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 26: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addBlocks(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registration = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
|
+ public boolean hasRegistration() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ return registration_;
|
|
|
|
+ } else {
|
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registration_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistration(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
|
|
|
|
+ registration_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registration_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegistration() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registrationBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registration_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
|
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
|
|
|
|
+ registration_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registration_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registrationBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string blockPoolId = 2;
|
|
|
|
+ private java.lang.Object blockPoolId_ = "";
|
|
|
|
+ public boolean hasBlockPoolId() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public String getBlockPoolId() {
|
|
|
|
+ java.lang.Object ref = blockPoolId_;
|
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
|
+ blockPoolId_ = s;
|
|
|
|
+ return s;
|
|
|
|
+ } else {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlockPoolId(String value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlockPoolId() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ blockPoolId_ = getDefaultInstance().getBlockPoolId();
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ void setBlockPoolId(com.google.protobuf.ByteString value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ blockPoolId_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .ReceivedDeletedBlockInfoProto blocks = 3;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> blocks_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureBlocksIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto>(blocks_);
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder> blocksBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> getBlocksList() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto getBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllBlocks(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto> values) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ super.addAll(values, blocks_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlocks() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder getBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index); } else {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ if (blocksBuilder_ != null) {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder() {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder addBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder>
|
|
|
|
+ getBlocksBuilderList() {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>
|
|
|
|
+ getBlocksFieldBuilder() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProtoOrBuilder>(
|
|
|
|
+ blocks_,
|
|
|
|
+ ((bitField0_ & 0x00000004) == 0x00000004),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ blocks_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blocksBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BlockReceivedAndDeletedRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BlockReceivedAndDeletedRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BlockReceivedAndDeletedRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockReceivedAndDeletedResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+ }
|
|
|
|
+ public static final class BlockReceivedAndDeletedResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements BlockReceivedAndDeletedResponseProtoOrBuilder {
|
|
|
|
+ // Use BlockReceivedAndDeletedResponseProto.newBuilder() to construct.
|
|
|
|
+ private BlockReceivedAndDeletedResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private BlockReceivedAndDeletedResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final BlockReceivedAndDeletedResponseProto defaultInstance;
|
|
|
|
+ public static BlockReceivedAndDeletedResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto(this);
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:BlockReceivedAndDeletedResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new BlockReceivedAndDeletedResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:BlockReceivedAndDeletedResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ErrorReportRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registartion = 1;
|
|
|
|
+ boolean hasRegistartion();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required uint32 errorCode = 2;
|
|
|
|
+ boolean hasErrorCode();
|
|
|
|
+ int getErrorCode();
|
|
|
|
+
|
|
|
|
+ // required string msg = 3;
|
|
|
|
+ boolean hasMsg();
|
|
|
|
+ String getMsg();
|
|
|
|
+ }
|
|
|
|
+ public static final class ErrorReportRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ErrorReportRequestProtoOrBuilder {
|
|
|
|
+ // Use ErrorReportRequestProto.newBuilder() to construct.
|
|
|
|
+ private ErrorReportRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ErrorReportRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ErrorReportRequestProto defaultInstance;
|
|
|
|
+ public static ErrorReportRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ErrorReportRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public enum ErrorCode
|
|
|
|
+ implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
|
+ NOTIFY(0, 0),
|
|
|
|
+ DISK_ERROR(1, 1),
|
|
|
|
+ INVALID_BLOCK(2, 2),
|
|
|
|
+ FATAL_DISK_ERROR(3, 3),
|
|
|
|
+ ;
|
|
|
|
+
|
|
|
|
+ public static final int NOTIFY_VALUE = 0;
|
|
|
|
+ public static final int DISK_ERROR_VALUE = 1;
|
|
|
|
+ public static final int INVALID_BLOCK_VALUE = 2;
|
|
|
|
+ public static final int FATAL_DISK_ERROR_VALUE = 3;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public final int getNumber() { return value; }
|
|
|
|
+
|
|
|
|
+ public static ErrorCode valueOf(int value) {
|
|
|
|
+ switch (value) {
|
|
|
|
+ case 0: return NOTIFY;
|
|
|
|
+ case 1: return DISK_ERROR;
|
|
|
|
+ case 2: return INVALID_BLOCK;
|
|
|
|
+ case 3: return FATAL_DISK_ERROR;
|
|
|
|
+ default: return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.Internal.EnumLiteMap<ErrorCode>
|
|
|
|
+ internalGetValueMap() {
|
|
|
|
+ return internalValueMap;
|
|
|
|
+ }
|
|
|
|
+ private static com.google.protobuf.Internal.EnumLiteMap<ErrorCode>
|
|
|
|
+ internalValueMap =
|
|
|
|
+ new com.google.protobuf.Internal.EnumLiteMap<ErrorCode>() {
|
|
|
|
+ public ErrorCode findValueByNumber(int number) {
|
|
|
|
+ return ErrorCode.valueOf(number);
|
|
|
|
+ }
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
|
+ getValueDescriptor() {
|
|
|
|
+ return getDescriptor().getValues().get(index);
|
|
|
|
+ }
|
|
|
|
+ public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return getDescriptor();
|
|
|
|
+ }
|
|
|
|
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor().getEnumTypes().get(0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final ErrorCode[] VALUES = {
|
|
|
|
+ NOTIFY, DISK_ERROR, INVALID_BLOCK, FATAL_DISK_ERROR,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ public static ErrorCode valueOf(
|
|
|
|
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
|
+ if (desc.getType() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "EnumValueDescriptor is not for this type.");
|
|
|
|
+ }
|
|
|
|
+ return VALUES[desc.getIndex()];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private final int index;
|
|
|
|
+ private final int value;
|
|
|
|
+
|
|
|
|
+ private ErrorCode(int index, int value) {
|
|
|
|
+ this.index = index;
|
|
|
|
+ this.value = value;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(enum_scope:ErrorReportRequestProto.ErrorCode)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .DatanodeRegistrationProto registartion = 1;
|
|
|
|
+ public static final int REGISTARTION_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_;
|
|
|
|
+ public boolean hasRegistartion() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
|
|
|
|
+ return registartion_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
|
|
|
|
+ return registartion_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 errorCode = 2;
|
|
|
|
+ public static final int ERRORCODE_FIELD_NUMBER = 2;
|
|
|
|
+ private int errorCode_;
|
|
|
|
+ public boolean hasErrorCode() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public int getErrorCode() {
|
|
|
|
+ return errorCode_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string msg = 3;
|
|
|
|
+ public static final int MSG_FIELD_NUMBER = 3;
|
|
|
|
+ private java.lang.Object msg_;
|
|
|
|
+ public boolean hasMsg() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public String getMsg() {
|
|
|
|
+ java.lang.Object ref = msg_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ } else {
|
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
|
+ msg_ = s;
|
|
|
|
+ }
|
|
|
|
+ return s;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.ByteString getMsgBytes() {
|
|
|
|
+ java.lang.Object ref = msg_;
|
|
|
|
+ if (ref instanceof String) {
|
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
|
+ msg_ = b;
|
|
|
|
+ return b;
|
|
|
|
+ } else {
|
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ errorCode_ = 0;
|
|
|
|
+ msg_ = "";
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasRegistartion()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasErrorCode()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasMsg()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistartion().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, registartion_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeUInt32(2, errorCode_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ output.writeBytes(3, getMsgBytes());
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, registartion_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt32Size(2, errorCode_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBytesSize(3, getMsgBytes());
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasRegistartion() == other.hasRegistartion());
|
|
|
|
+ if (hasRegistartion()) {
|
|
|
|
+ result = result && getRegistartion()
|
|
|
|
+ .equals(other.getRegistartion());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasErrorCode() == other.hasErrorCode());
|
|
|
|
+ if (hasErrorCode()) {
|
|
|
|
+ result = result && (getErrorCode()
|
|
|
|
+ == other.getErrorCode());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasMsg() == other.hasMsg());
|
|
|
|
+ if (hasMsg()) {
|
|
|
|
+ result = result && getMsg()
|
|
|
|
+ .equals(other.getMsg());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasRegistartion()) {
|
|
|
|
+ hash = (37 * hash) + REGISTARTION_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getRegistartion().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasErrorCode()) {
|
|
|
|
+ hash = (37 * hash) + ERRORCODE_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getErrorCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasMsg()) {
|
|
|
|
+ hash = (37 * hash) + MSG_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getMsg().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getRegistartionFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ registartionBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ errorCode_ = 0;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ msg_ = "";
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ result.registartion_ = registartion_;
|
|
|
|
+ } else {
|
|
|
|
+ result.registartion_ = registartionBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.errorCode_ = errorCode_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ result.msg_ = msg_;
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasRegistartion()) {
|
|
|
|
+ mergeRegistartion(other.getRegistartion());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasErrorCode()) {
|
|
|
|
+ setErrorCode(other.getErrorCode());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasMsg()) {
|
|
|
|
+ setMsg(other.getMsg());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasRegistartion()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasErrorCode()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasMsg()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getRegistartion().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder();
|
|
|
|
+ if (hasRegistartion()) {
|
|
|
|
+ subBuilder.mergeFrom(getRegistartion());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setRegistartion(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 16: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ errorCode_ = input.readUInt32();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 26: {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ msg_ = input.readBytes();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .DatanodeRegistrationProto registartion = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder> registartionBuilder_;
|
|
|
|
+ public boolean hasRegistartion() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto getRegistartion() {
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ return registartion_;
|
|
|
|
+ } else {
|
|
|
|
+ return registartionBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ registartion_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registartionBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setRegistartion(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder builderForValue) {
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ registartion_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registartionBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeRegistartion(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto value) {
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ registartion_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance()) {
|
|
|
|
+ registartion_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.newBuilder(registartion_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ registartion_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registartionBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearRegistartion() {
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ registartion_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ registartionBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder getRegistartionBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getRegistartionFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder getRegistartionOrBuilder() {
|
|
|
|
+ if (registartionBuilder_ != null) {
|
|
|
|
+ return registartionBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return registartion_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>
|
|
|
|
+ getRegistartionFieldBuilder() {
|
|
|
|
+ if (registartionBuilder_ == null) {
|
|
|
|
+ registartionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProtoOrBuilder>(
|
|
|
|
+ registartion_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ registartion_ = null;
|
|
|
|
+ }
|
|
|
|
+ return registartionBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint32 errorCode = 2;
|
|
|
|
+ private int errorCode_ ;
|
|
|
|
+ public boolean hasErrorCode() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public int getErrorCode() {
|
|
|
|
+ return errorCode_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setErrorCode(int value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ errorCode_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearErrorCode() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ errorCode_ = 0;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required string msg = 3;
|
|
|
|
+ private java.lang.Object msg_ = "";
|
|
|
|
+ public boolean hasMsg() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public String getMsg() {
|
|
|
|
+ java.lang.Object ref = msg_;
|
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
|
+ msg_ = s;
|
|
|
|
+ return s;
|
|
|
|
+ } else {
|
|
|
|
+ return (String) ref;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setMsg(String value) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ msg_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearMsg() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ msg_ = getDefaultInstance().getMsg();
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ void setMsg(com.google.protobuf.ByteString value) {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ msg_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ErrorReportRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ErrorReportRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ErrorReportRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ErrorReportResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+ }
|
|
|
|
+ public static final class ErrorReportResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ErrorReportResponseProtoOrBuilder {
|
|
|
|
+ // Use ErrorReportResponseProto.newBuilder() to construct.
|
|
|
|
+ private ErrorReportResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ErrorReportResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ErrorReportResponseProto defaultInstance;
|
|
|
|
+ public static ErrorReportResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ErrorReportResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ErrorReportResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto(this);
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ErrorReportResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ErrorReportResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ErrorReportResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ProcessUpgradeRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto cmd = 1;
|
|
|
|
+ boolean hasCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class ProcessUpgradeRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ProcessUpgradeRequestProtoOrBuilder {
|
|
|
|
+ // Use ProcessUpgradeRequestProto.newBuilder() to construct.
|
|
|
|
+ private ProcessUpgradeRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ProcessUpgradeRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ProcessUpgradeRequestProto defaultInstance;
|
|
|
|
+ public static ProcessUpgradeRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ProcessUpgradeRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // optional .UpgradeCommandProto cmd = 1;
|
|
|
|
+ public static final int CMD_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_;
|
|
|
|
+ public boolean hasCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ if (!getCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, cmd_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, cmd_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasCmd() == other.hasCmd());
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ result = result && getCmd()
|
|
|
|
+ .equals(other.getCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ hash = (37 * hash) + CMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getCmdFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ result.cmd_ = cmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.cmd_ = cmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasCmd()) {
|
|
|
|
+ mergeCmd(other.getCmd());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ if (!getCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder();
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto cmd = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> cmdBuilder_;
|
|
|
|
+ public boolean hasCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ return cmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return cmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ cmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) {
|
|
|
|
+ cmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ cmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCmd() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
|
|
|
|
+ if (cmdBuilder_ != null) {
|
|
|
|
+ return cmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>
|
|
|
|
+ getCmdFieldBuilder() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>(
|
|
|
|
+ cmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ cmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return cmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ProcessUpgradeRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ProcessUpgradeRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ProcessUpgradeRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ProcessUpgradeResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto cmd = 1;
|
|
|
|
+ boolean hasCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder();
|
|
|
|
+ }
|
|
|
|
+ public static final class ProcessUpgradeResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ProcessUpgradeResponseProtoOrBuilder {
|
|
|
|
+ // Use ProcessUpgradeResponseProto.newBuilder() to construct.
|
|
|
|
+ private ProcessUpgradeResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ProcessUpgradeResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ProcessUpgradeResponseProto defaultInstance;
|
|
|
|
+ public static ProcessUpgradeResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ProcessUpgradeResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // optional .UpgradeCommandProto cmd = 1;
|
|
|
|
+ public static final int CMD_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_;
|
|
|
|
+ public boolean hasCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ if (!getCmd().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, cmd_);
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, cmd_);
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasCmd() == other.hasCmd());
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ result = result && getCmd()
|
|
|
|
+ .equals(other.getCmd());
|
|
|
|
+ }
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ hash = (37 * hash) + CMD_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getCmd().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ProcessUpgradeResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getCmdFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ result.cmd_ = cmd_;
|
|
|
|
+ } else {
|
|
|
|
+ result.cmd_ = cmdBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasCmd()) {
|
|
|
|
+ mergeCmd(other.getCmd());
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ if (!getCmd().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder();
|
|
|
|
+ if (hasCmd()) {
|
|
|
|
+ subBuilder.mergeFrom(getCmd());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setCmd(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // optional .UpgradeCommandProto cmd = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder> cmdBuilder_;
|
|
|
|
+ public boolean hasCmd() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto getCmd() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ return cmd_;
|
|
|
|
+ } else {
|
|
|
|
+ return cmdBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ cmd_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCmd(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder builderForValue) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto value) {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ cmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance()) {
|
|
|
|
+ cmd_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.newBuilder(cmd_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ cmd_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCmd() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ cmdBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder getCmdBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getCmdFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder getCmdOrBuilder() {
|
|
|
|
+ if (cmdBuilder_ != null) {
|
|
|
|
+ return cmdBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return cmd_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>
|
|
|
|
+ getCmdFieldBuilder() {
|
|
|
|
+ if (cmdBuilder_ == null) {
|
|
|
|
+ cmdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProtoOrBuilder>(
|
|
|
|
+ cmd_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ cmd_ = null;
|
|
|
|
+ }
|
|
|
|
+ return cmdBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ProcessUpgradeResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ProcessUpgradeResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ProcessUpgradeResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ReportBadBlocksRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // repeated .LocatedBlockProto blocks = 1;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto>
|
|
|
|
+ getBlocksList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
|
|
|
|
+ int getBlocksCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class ReportBadBlocksRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ReportBadBlocksRequestProtoOrBuilder {
|
|
|
|
+ // Use ReportBadBlocksRequestProto.newBuilder() to construct.
|
|
|
|
+ private ReportBadBlocksRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ReportBadBlocksRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ReportBadBlocksRequestProto defaultInstance;
|
|
|
|
+ public static ReportBadBlocksRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ReportBadBlocksRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .LocatedBlockProto blocks = 1;
|
|
|
|
+ public static final int BLOCKS_FIELD_NUMBER = 1;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ return blocks_;
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ output.writeMessage(1, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, blocks_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && getBlocksList()
|
|
|
|
+ .equals(other.getBlocksList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (getBlocksCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlocksList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getBlocksFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ blocks_ = java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ }
|
|
|
|
+ result.blocks_ = blocks_;
|
|
|
|
+ } else {
|
|
|
|
+ result.blocks_ = blocksBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocks_.isEmpty()) {
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ } else {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.addAll(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
|
+ if (blocksBuilder_.isEmpty()) {
|
|
|
|
+ blocksBuilder_.dispose();
|
|
|
|
+ blocksBuilder_ = null;
|
|
|
|
+ blocks_ = other.blocks_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ blocksBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getBlocksFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(other.blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addBlocks(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // repeated .LocatedBlockProto blocks = 1;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureBlocksIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto>(blocks_);
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getBlocksCount() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return blocksBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addBlocks(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllBlocks(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ super.addAll(values, blocks_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlocks() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeBlocks(int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ ensureBlocksIsMutable();
|
|
|
|
+ blocks_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blocksBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ return blocks_.get(index); } else {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
|
+ if (blocksBuilder_ != null) {
|
|
|
|
+ return blocksBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder>
|
|
|
|
+ getBlocksBuilderList() {
|
|
|
|
+ return getBlocksFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
|
+ getBlocksFieldBuilder() {
|
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
|
+ blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
|
|
|
|
+ blocks_,
|
|
|
|
+ ((bitField0_ & 0x00000001) == 0x00000001),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ blocks_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blocksBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ReportBadBlocksRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ReportBadBlocksRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ReportBadBlocksRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface ReportBadBlocksResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+ }
|
|
|
|
+ public static final class ReportBadBlocksResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements ReportBadBlocksResponseProtoOrBuilder {
|
|
|
|
+ // Use ReportBadBlocksResponseProto.newBuilder() to construct.
|
|
|
|
+ private ReportBadBlocksResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private ReportBadBlocksResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final ReportBadBlocksResponseProto defaultInstance;
|
|
|
|
+ public static ReportBadBlocksResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public ReportBadBlocksResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_ReportBadBlocksResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto(this);
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:ReportBadBlocksResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new ReportBadBlocksResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:ReportBadBlocksResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface CommitBlockSynchronizationRequestProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+
|
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
|
+ boolean hasBlock();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();
|
|
|
|
+
|
|
|
|
+ // required uint64 newGenStamp = 2;
|
|
|
|
+ boolean hasNewGenStamp();
|
|
|
|
+ long getNewGenStamp();
|
|
|
|
+
|
|
|
|
+ // required uint64 newLength = 3;
|
|
|
|
+ boolean hasNewLength();
|
|
|
|
+ long getNewLength();
|
|
|
|
+
|
|
|
|
+ // required bool closeFile = 4;
|
|
|
|
+ boolean hasCloseFile();
|
|
|
|
+ boolean getCloseFile();
|
|
|
|
+
|
|
|
|
+ // required bool deleteBlock = 5;
|
|
|
|
+ boolean hasDeleteBlock();
|
|
|
|
+ boolean getDeleteBlock();
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeIDProto newTaragets = 6;
|
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>
|
|
|
|
+ getNewTaragetsList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index);
|
|
|
|
+ int getNewTaragetsCount();
|
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
|
|
+ getNewTaragetsOrBuilderList();
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
|
|
|
|
+ int index);
|
|
|
|
+ }
|
|
|
|
+ public static final class CommitBlockSynchronizationRequestProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements CommitBlockSynchronizationRequestProtoOrBuilder {
|
|
|
|
+ // Use CommitBlockSynchronizationRequestProto.newBuilder() to construct.
|
|
|
|
+ private CommitBlockSynchronizationRequestProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private CommitBlockSynchronizationRequestProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final CommitBlockSynchronizationRequestProto defaultInstance;
|
|
|
|
+ public static CommitBlockSynchronizationRequestProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public CommitBlockSynchronizationRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
|
+ public static final int BLOCK_FIELD_NUMBER = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
|
|
|
|
+ public boolean hasBlock() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
|
+ return block_;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
|
+ return block_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 newGenStamp = 2;
|
|
|
|
+ public static final int NEWGENSTAMP_FIELD_NUMBER = 2;
|
|
|
|
+ private long newGenStamp_;
|
|
|
|
+ public boolean hasNewGenStamp() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public long getNewGenStamp() {
|
|
|
|
+ return newGenStamp_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 newLength = 3;
|
|
|
|
+ public static final int NEWLENGTH_FIELD_NUMBER = 3;
|
|
|
|
+ private long newLength_;
|
|
|
|
+ public boolean hasNewLength() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public long getNewLength() {
|
|
|
|
+ return newLength_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required bool closeFile = 4;
|
|
|
|
+ public static final int CLOSEFILE_FIELD_NUMBER = 4;
|
|
|
|
+ private boolean closeFile_;
|
|
|
|
+ public boolean hasCloseFile() {
|
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
|
+ }
|
|
|
|
+ public boolean getCloseFile() {
|
|
|
|
+ return closeFile_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required bool deleteBlock = 5;
|
|
|
|
+ public static final int DELETEBLOCK_FIELD_NUMBER = 5;
|
|
|
|
+ private boolean deleteBlock_;
|
|
|
|
+ public boolean hasDeleteBlock() {
|
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
|
+ }
|
|
|
|
+ public boolean getDeleteBlock() {
|
|
|
|
+ return deleteBlock_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeIDProto newTaragets = 6;
|
|
|
|
+ public static final int NEWTARAGETS_FIELD_NUMBER = 6;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> newTaragets_;
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getNewTaragetsList() {
|
|
|
|
+ return newTaragets_;
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
|
|
+ getNewTaragetsOrBuilderList() {
|
|
|
|
+ return newTaragets_;
|
|
|
|
+ }
|
|
|
|
+ public int getNewTaragetsCount() {
|
|
|
|
+ return newTaragets_.size();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) {
|
|
|
|
+ return newTaragets_.get(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return newTaragets_.get(index);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
|
+ newGenStamp_ = 0L;
|
|
|
|
+ newLength_ = 0L;
|
|
|
|
+ closeFile_ = false;
|
|
|
|
+ deleteBlock_ = false;
|
|
|
|
+ newTaragets_ = java.util.Collections.emptyList();
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ if (!hasBlock()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasNewGenStamp()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasNewLength()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasCloseFile()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasDeleteBlock()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getNewTaragetsCount(); i++) {
|
|
|
|
+ if (!getNewTaragets(i).isInitialized()) {
|
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ output.writeMessage(1, block_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ output.writeUInt64(2, newGenStamp_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ output.writeUInt64(3, newLength_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ output.writeBool(4, closeFile_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ output.writeBool(5, deleteBlock_);
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < newTaragets_.size(); i++) {
|
|
|
|
+ output.writeMessage(6, newTaragets_.get(i));
|
|
|
|
+ }
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(1, block_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(2, newGenStamp_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeUInt64Size(3, newLength_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBoolSize(4, closeFile_);
|
|
|
|
+ }
|
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeBoolSize(5, deleteBlock_);
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < newTaragets_.size(); i++) {
|
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
|
+ .computeMessageSize(6, newTaragets_.get(i));
|
|
|
|
+ }
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result && (hasBlock() == other.hasBlock());
|
|
|
|
+ if (hasBlock()) {
|
|
|
|
+ result = result && getBlock()
|
|
|
|
+ .equals(other.getBlock());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasNewGenStamp() == other.hasNewGenStamp());
|
|
|
|
+ if (hasNewGenStamp()) {
|
|
|
|
+ result = result && (getNewGenStamp()
|
|
|
|
+ == other.getNewGenStamp());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasNewLength() == other.hasNewLength());
|
|
|
|
+ if (hasNewLength()) {
|
|
|
|
+ result = result && (getNewLength()
|
|
|
|
+ == other.getNewLength());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasCloseFile() == other.hasCloseFile());
|
|
|
|
+ if (hasCloseFile()) {
|
|
|
|
+ result = result && (getCloseFile()
|
|
|
|
+ == other.getCloseFile());
|
|
|
|
+ }
|
|
|
|
+ result = result && (hasDeleteBlock() == other.hasDeleteBlock());
|
|
|
|
+ if (hasDeleteBlock()) {
|
|
|
|
+ result = result && (getDeleteBlock()
|
|
|
|
+ == other.getDeleteBlock());
|
|
|
|
+ }
|
|
|
|
+ result = result && getNewTaragetsList()
|
|
|
|
+ .equals(other.getNewTaragetsList());
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ if (hasBlock()) {
|
|
|
|
+ hash = (37 * hash) + BLOCK_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getBlock().hashCode();
|
|
|
|
+ }
|
|
|
|
+ if (hasNewGenStamp()) {
|
|
|
|
+ hash = (37 * hash) + NEWGENSTAMP_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getNewGenStamp());
|
|
|
|
+ }
|
|
|
|
+ if (hasNewLength()) {
|
|
|
|
+ hash = (37 * hash) + NEWLENGTH_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashLong(getNewLength());
|
|
|
|
+ }
|
|
|
|
+ if (hasCloseFile()) {
|
|
|
|
+ hash = (37 * hash) + CLOSEFILE_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashBoolean(getCloseFile());
|
|
|
|
+ }
|
|
|
|
+ if (hasDeleteBlock()) {
|
|
|
|
+ hash = (37 * hash) + DELETEBLOCK_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + hashBoolean(getDeleteBlock());
|
|
|
|
+ }
|
|
|
|
+ if (getNewTaragetsCount() > 0) {
|
|
|
|
+ hash = (37 * hash) + NEWTARAGETS_FIELD_NUMBER;
|
|
|
|
+ hash = (53 * hash) + getNewTaragetsList().hashCode();
|
|
|
|
+ }
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ getBlockFieldBuilder();
|
|
|
|
+ getNewTaragetsFieldBuilder();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ newGenStamp_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ newLength_ = 0L;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ closeFile_ = false;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ deleteBlock_ = false;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ newTaragets_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto(this);
|
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
|
+ int to_bitField0_ = 0;
|
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
|
+ }
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ result.block_ = block_;
|
|
|
|
+ } else {
|
|
|
|
+ result.block_ = blockBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
|
+ }
|
|
|
|
+ result.newGenStamp_ = newGenStamp_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
|
+ }
|
|
|
|
+ result.newLength_ = newLength_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
|
+ }
|
|
|
|
+ result.closeFile_ = closeFile_;
|
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
|
+ to_bitField0_ |= 0x00000010;
|
|
|
|
+ }
|
|
|
|
+ result.deleteBlock_ = deleteBlock_;
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ newTaragets_ = java.util.Collections.unmodifiableList(newTaragets_);
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ }
|
|
|
|
+ result.newTaragets_ = newTaragets_;
|
|
|
|
+ } else {
|
|
|
|
+ result.newTaragets_ = newTaragetsBuilder_.build();
|
|
|
|
+ }
|
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance()) return this;
|
|
|
|
+ if (other.hasBlock()) {
|
|
|
|
+ mergeBlock(other.getBlock());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasNewGenStamp()) {
|
|
|
|
+ setNewGenStamp(other.getNewGenStamp());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasNewLength()) {
|
|
|
|
+ setNewLength(other.getNewLength());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasCloseFile()) {
|
|
|
|
+ setCloseFile(other.getCloseFile());
|
|
|
|
+ }
|
|
|
|
+ if (other.hasDeleteBlock()) {
|
|
|
|
+ setDeleteBlock(other.getDeleteBlock());
|
|
|
|
+ }
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ if (!other.newTaragets_.isEmpty()) {
|
|
|
|
+ if (newTaragets_.isEmpty()) {
|
|
|
|
+ newTaragets_ = other.newTaragets_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ } else {
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.addAll(other.newTaragets_);
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ if (!other.newTaragets_.isEmpty()) {
|
|
|
|
+ if (newTaragetsBuilder_.isEmpty()) {
|
|
|
|
+ newTaragetsBuilder_.dispose();
|
|
|
|
+ newTaragetsBuilder_ = null;
|
|
|
|
+ newTaragets_ = other.newTaragets_;
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ newTaragetsBuilder_ =
|
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
|
+ getNewTaragetsFieldBuilder() : null;
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.addAllMessages(other.newTaragets_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ if (!hasBlock()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasNewGenStamp()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasNewLength()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasCloseFile()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!hasDeleteBlock()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ if (!getBlock().isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ for (int i = 0; i < getNewTaragetsCount(); i++) {
|
|
|
|
+ if (!getNewTaragets(i).isInitialized()) {
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 10: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
|
+ if (hasBlock()) {
|
|
|
|
+ subBuilder.mergeFrom(getBlock());
|
|
|
|
+ }
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ setBlock(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 16: {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ newGenStamp_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 24: {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ newLength_ = input.readUInt64();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 32: {
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ closeFile_ = input.readBool();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 40: {
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ deleteBlock_ = input.readBool();
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ case 50: {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder();
|
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
|
+ addNewTaragets(subBuilder.buildPartial());
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int bitField0_;
|
|
|
|
+
|
|
|
|
+ // required .ExtendedBlockProto block = 1;
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
|
|
|
|
+ public boolean hasBlock() {
|
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ return block_;
|
|
|
|
+ } else {
|
|
|
|
+ return blockBuilder_.getMessage();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ block_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.setMessage(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setBlock(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ block_ = builderForValue.build();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.setMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
|
+ block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
|
+ block_ =
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(block_).mergeFrom(value).buildPartial();
|
|
|
|
+ } else {
|
|
|
|
+ block_ = value;
|
|
|
|
+ }
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.mergeFrom(value);
|
|
|
|
+ }
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearBlock() {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ blockBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
|
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
|
+ onChanged();
|
|
|
|
+ return getBlockFieldBuilder().getBuilder();
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
|
|
|
|
+ if (blockBuilder_ != null) {
|
|
|
|
+ return blockBuilder_.getMessageOrBuilder();
|
|
|
|
+ } else {
|
|
|
|
+ return block_;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
|
+ getBlockFieldBuilder() {
|
|
|
|
+ if (blockBuilder_ == null) {
|
|
|
|
+ blockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
|
+ block_,
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ block_ = null;
|
|
|
|
+ }
|
|
|
|
+ return blockBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 newGenStamp = 2;
|
|
|
|
+ private long newGenStamp_ ;
|
|
|
|
+ public boolean hasNewGenStamp() {
|
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
|
+ }
|
|
|
|
+ public long getNewGenStamp() {
|
|
|
|
+ return newGenStamp_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setNewGenStamp(long value) {
|
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
|
+ newGenStamp_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearNewGenStamp() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
|
+ newGenStamp_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required uint64 newLength = 3;
|
|
|
|
+ private long newLength_ ;
|
|
|
|
+ public boolean hasNewLength() {
|
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
|
+ }
|
|
|
|
+ public long getNewLength() {
|
|
|
|
+ return newLength_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setNewLength(long value) {
|
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
|
+ newLength_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearNewLength() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
|
+ newLength_ = 0L;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required bool closeFile = 4;
|
|
|
|
+ private boolean closeFile_ ;
|
|
|
|
+ public boolean hasCloseFile() {
|
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
|
+ }
|
|
|
|
+ public boolean getCloseFile() {
|
|
|
|
+ return closeFile_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setCloseFile(boolean value) {
|
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
|
+ closeFile_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearCloseFile() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
|
+ closeFile_ = false;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // required bool deleteBlock = 5;
|
|
|
|
+ private boolean deleteBlock_ ;
|
|
|
|
+ public boolean hasDeleteBlock() {
|
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
|
+ }
|
|
|
|
+ public boolean getDeleteBlock() {
|
|
|
|
+ return deleteBlock_;
|
|
|
|
+ }
|
|
|
|
+ public Builder setDeleteBlock(boolean value) {
|
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
|
+ deleteBlock_ = value;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearDeleteBlock() {
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
|
+ deleteBlock_ = false;
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // repeated .DatanodeIDProto newTaragets = 6;
|
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> newTaragets_ =
|
|
|
|
+ java.util.Collections.emptyList();
|
|
|
|
+ private void ensureNewTaragetsIsMutable() {
|
|
|
|
+ if (!((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
|
+ newTaragets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto>(newTaragets_);
|
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> newTaragetsBuilder_;
|
|
|
|
+
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> getNewTaragetsList() {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ return java.util.Collections.unmodifiableList(newTaragets_);
|
|
|
|
+ } else {
|
|
|
|
+ return newTaragetsBuilder_.getMessageList();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public int getNewTaragetsCount() {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ return newTaragets_.size();
|
|
|
|
+ } else {
|
|
|
|
+ return newTaragetsBuilder_.getCount();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getNewTaragets(int index) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ return newTaragets_.get(index);
|
|
|
|
+ } else {
|
|
|
|
+ return newTaragetsBuilder_.getMessage(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public Builder setNewTaragets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.set(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.setMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder setNewTaragets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.set(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.setMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addNewTaragets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.add(value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.addMessage(value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addNewTaragets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ if (value == null) {
|
|
|
|
+ throw new NullPointerException();
|
|
|
|
+ }
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.add(index, value);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.addMessage(index, value);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addNewTaragets(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.add(builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.addMessage(builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addNewTaragets(
|
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.add(index, builderForValue.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.addMessage(index, builderForValue.build());
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder addAllNewTaragets(
|
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto> values) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ super.addAll(values, newTaragets_);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.addAllMessages(values);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder clearNewTaragets() {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ newTaragets_ = java.util.Collections.emptyList();
|
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.clear();
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public Builder removeNewTaragets(int index) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ ensureNewTaragetsIsMutable();
|
|
|
|
+ newTaragets_.remove(index);
|
|
|
|
+ onChanged();
|
|
|
|
+ } else {
|
|
|
|
+ newTaragetsBuilder_.remove(index);
|
|
|
|
+ }
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getNewTaragetsBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getNewTaragetsFieldBuilder().getBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getNewTaragetsOrBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ return newTaragets_.get(index); } else {
|
|
|
|
+ return newTaragetsBuilder_.getMessageOrBuilder(index);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
|
|
+ getNewTaragetsOrBuilderList() {
|
|
|
|
+ if (newTaragetsBuilder_ != null) {
|
|
|
|
+ return newTaragetsBuilder_.getMessageOrBuilderList();
|
|
|
|
+ } else {
|
|
|
|
+ return java.util.Collections.unmodifiableList(newTaragets_);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder() {
|
|
|
|
+ return getNewTaragetsFieldBuilder().addBuilder(
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder addNewTaragetsBuilder(
|
|
|
|
+ int index) {
|
|
|
|
+ return getNewTaragetsFieldBuilder().addBuilder(
|
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder>
|
|
|
|
+ getNewTaragetsBuilderList() {
|
|
|
|
+ return getNewTaragetsFieldBuilder().getBuilderList();
|
|
|
|
+ }
|
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>
|
|
|
|
+ getNewTaragetsFieldBuilder() {
|
|
|
|
+ if (newTaragetsBuilder_ == null) {
|
|
|
|
+ newTaragetsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
|
|
|
|
+ newTaragets_,
|
|
|
|
+ ((bitField0_ & 0x00000020) == 0x00000020),
|
|
|
|
+ getParentForChildren(),
|
|
|
|
+ isClean());
|
|
|
|
+ newTaragets_ = null;
|
|
|
|
+ }
|
|
|
|
+ return newTaragetsBuilder_;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:CommitBlockSynchronizationRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new CommitBlockSynchronizationRequestProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:CommitBlockSynchronizationRequestProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface CommitBlockSynchronizationResponseProtoOrBuilder
|
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
|
+ }
|
|
|
|
+ public static final class CommitBlockSynchronizationResponseProto extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
|
+ implements CommitBlockSynchronizationResponseProtoOrBuilder {
|
|
|
|
+ // Use CommitBlockSynchronizationResponseProto.newBuilder() to construct.
|
|
|
|
+ private CommitBlockSynchronizationResponseProto(Builder builder) {
|
|
|
|
+ super(builder);
|
|
|
|
+ }
|
|
|
|
+ private CommitBlockSynchronizationResponseProto(boolean noInit) {}
|
|
|
|
+
|
|
|
|
+ private static final CommitBlockSynchronizationResponseProto defaultInstance;
|
|
|
|
+ public static CommitBlockSynchronizationResponseProto getDefaultInstance() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public CommitBlockSynchronizationResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return defaultInstance;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void initFields() {
|
|
|
|
+ }
|
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
|
+
|
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ getSerializedSize();
|
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
|
+ public int getSerializedSize() {
|
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
|
+ if (size != -1) return size;
|
|
|
|
+
|
|
|
|
+ size = 0;
|
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
|
+ memoizedSerializedSize = size;
|
|
|
|
+ return size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
|
+ return super.writeReplace();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
|
+ if (obj == this) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)) {
|
|
|
|
+ return super.equals(obj);
|
|
|
|
+ }
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) obj;
|
|
|
|
+
|
|
|
|
+ boolean result = true;
|
|
|
|
+ result = result &&
|
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public int hashCode() {
|
|
|
|
+ int hash = 41;
|
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
|
+ return hash;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(byte[] data)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
|
|
|
|
+ byte[] data,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseDelimitedFrom(
|
|
|
|
+ java.io.InputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ Builder builder = newBuilder();
|
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
|
+ return builder.buildParsed();
|
|
|
|
+ } else {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
|
+ }
|
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto parseFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
|
+ .buildParsed();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto prototype) {
|
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
|
+ }
|
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ protected Builder newBuilderForType(
|
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
|
+ return builder;
|
|
|
|
+ }
|
|
|
|
+ public static final class Builder extends
|
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProtoOrBuilder {
|
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_descriptor;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.newBuilder()
|
|
|
|
+ private Builder() {
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
|
+ super(parent);
|
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
|
+ }
|
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ private static Builder create() {
|
|
|
|
+ return new Builder();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clear() {
|
|
|
|
+ super.clear();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder clone() {
|
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto getDefaultInstanceForType() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto build() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildParsed()
|
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = buildPartial();
|
|
|
|
+ if (!result.isInitialized()) {
|
|
|
|
+ throw newUninitializedMessageException(
|
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
|
+ }
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto buildPartial() {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto(this);
|
|
|
|
+ onBuilt();
|
|
|
|
+ return result;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) {
|
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto)other);
|
|
|
|
+ } else {
|
|
|
|
+ super.mergeFrom(other);
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto other) {
|
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()) return this;
|
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final boolean isInitialized() {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Builder mergeFrom(
|
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
|
+ throws java.io.IOException {
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
|
+ this.getUnknownFields());
|
|
|
|
+ while (true) {
|
|
|
|
+ int tag = input.readTag();
|
|
|
|
+ switch (tag) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ default: {
|
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
|
+ extensionRegistry, tag)) {
|
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
|
+ onChanged();
|
|
|
|
+ return this;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(builder_scope:CommitBlockSynchronizationResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ static {
|
|
|
|
+ defaultInstance = new CommitBlockSynchronizationResponseProto(true);
|
|
|
|
+ defaultInstance.initFields();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(class_scope:CommitBlockSynchronizationResponseProto)
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static abstract class DatanodeProtocolService
|
|
|
|
+ implements com.google.protobuf.Service {
|
|
|
|
+ protected DatanodeProtocolService() {}
|
|
|
|
+
|
|
|
|
+ public interface Interface {
|
|
|
|
+ public abstract void registerDatanode(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void sendHeartbeat(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void blockReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void blockReceivedAndDeleted(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void errorReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void processUpgrade(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void reportBadBlocks(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void commitBlockSynchronization(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done);
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.Service newReflectiveService(
|
|
|
|
+ final Interface impl) {
|
|
|
|
+ return new DatanodeProtocolService() {
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void registerDatanode(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done) {
|
|
|
|
+ impl.registerDatanode(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void sendHeartbeat(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done) {
|
|
|
|
+ impl.sendHeartbeat(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void blockReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done) {
|
|
|
|
+ impl.blockReport(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void blockReceivedAndDeleted(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done) {
|
|
|
|
+ impl.blockReceivedAndDeleted(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void errorReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done) {
|
|
|
|
+ impl.errorReport(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void processUpgrade(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto> done) {
|
|
|
|
+ impl.processUpgrade(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void reportBadBlocks(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done) {
|
|
|
|
+ impl.reportBadBlocks(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @java.lang.Override
|
|
|
|
+ public void commitBlockSynchronization(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done) {
|
|
|
|
+ impl.commitBlockSynchronization(controller, request, done);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ };
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.BlockingService
|
|
|
|
+ newReflectiveBlockingService(final BlockingInterface impl) {
|
|
|
|
+ return new com.google.protobuf.BlockingService() {
|
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Message callBlockingMethod(
|
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ com.google.protobuf.Message request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "Service.callBlockingMethod() given method descriptor for " +
|
|
|
|
+ "wrong service type.");
|
|
|
|
+ }
|
|
|
|
+ switch(method.getIndex()) {
|
|
|
|
+ case 0:
|
|
|
|
+ return impl.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request);
|
|
|
|
+ case 1:
|
|
|
|
+ return impl.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request);
|
|
|
|
+ case 2:
|
|
|
|
+ return impl.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request);
|
|
|
|
+ case 3:
|
|
|
|
+ return impl.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request);
|
|
|
|
+ case 4:
|
|
|
|
+ return impl.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request);
|
|
|
|
+ case 5:
|
|
|
|
+ return impl.processUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)request);
|
|
|
|
+ case 6:
|
|
|
|
+ return impl.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request);
|
|
|
|
+ case 7:
|
|
|
|
+ return impl.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request);
|
|
|
|
+ default:
|
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Message
|
|
|
|
+ getRequestPrototype(
|
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
|
+ "descriptor for wrong service type.");
|
|
|
|
+ }
|
|
|
|
+ switch(method.getIndex()) {
|
|
|
|
+ case 0:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
|
|
|
|
+ case 1:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
|
|
|
|
+ case 2:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
|
|
|
|
+ case 3:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
|
|
|
|
+ case 4:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
|
|
|
|
+ case 5:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance();
|
|
|
|
+ case 6:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
|
|
|
|
+ case 7:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
|
|
|
|
+ default:
|
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Message
|
|
|
|
+ getResponsePrototype(
|
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
|
+ "descriptor for wrong service type.");
|
|
|
|
+ }
|
|
|
|
+ switch(method.getIndex()) {
|
|
|
|
+ case 0:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
|
|
|
|
+ case 1:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
|
|
|
|
+ case 2:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
|
|
|
|
+ case 3:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
|
|
|
|
+ case 4:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
|
|
|
|
+ case 5:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance();
|
|
|
|
+ case 6:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
|
|
|
|
+ case 7:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
|
|
|
|
+ default:
|
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ };
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public abstract void registerDatanode(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void sendHeartbeat(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void blockReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void blockReceivedAndDeleted(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void errorReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void processUpgrade(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void reportBadBlocks(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public abstract void commitBlockSynchronization(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done);
|
|
|
|
+
|
|
|
|
+ public static final
|
|
|
|
+ com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.getDescriptor().getServices().get(0);
|
|
|
|
+ }
|
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
|
+ getDescriptorForType() {
|
|
|
|
+ return getDescriptor();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final void callMethod(
|
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ com.google.protobuf.Message request,
|
|
|
|
+ com.google.protobuf.RpcCallback<
|
|
|
|
+ com.google.protobuf.Message> done) {
|
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "Service.callMethod() given method descriptor for wrong " +
|
|
|
|
+ "service type.");
|
|
|
|
+ }
|
|
|
|
+ switch(method.getIndex()) {
|
|
|
|
+ case 0:
|
|
|
|
+ this.registerDatanode(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 1:
|
|
|
|
+ this.sendHeartbeat(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 2:
|
|
|
|
+ this.blockReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 3:
|
|
|
|
+ this.blockReceivedAndDeleted(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 4:
|
|
|
|
+ this.errorReport(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 5:
|
|
|
|
+ this.processUpgrade(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 6:
|
|
|
|
+ this.reportBadBlocks(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ case 7:
|
|
|
|
+ this.commitBlockSynchronization(controller, (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto)request,
|
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto>specializeCallback(
|
|
|
|
+ done));
|
|
|
|
+ return;
|
|
|
|
+ default:
|
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Message
|
|
|
|
+ getRequestPrototype(
|
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
|
+ "descriptor for wrong service type.");
|
|
|
|
+ }
|
|
|
|
+ switch(method.getIndex()) {
|
|
|
|
+ case 0:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.getDefaultInstance();
|
|
|
|
+ case 1:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
|
|
|
|
+ case 2:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.getDefaultInstance();
|
|
|
|
+ case 3:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.getDefaultInstance();
|
|
|
|
+ case 4:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.getDefaultInstance();
|
|
|
|
+ case 5:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.getDefaultInstance();
|
|
|
|
+ case 6:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.getDefaultInstance();
|
|
|
|
+ case 7:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.getDefaultInstance();
|
|
|
|
+ default:
|
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public final com.google.protobuf.Message
|
|
|
|
+ getResponsePrototype(
|
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
|
+ "descriptor for wrong service type.");
|
|
|
|
+ }
|
|
|
|
+ switch(method.getIndex()) {
|
|
|
|
+ case 0:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance();
|
|
|
|
+ case 1:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
|
|
|
|
+ case 2:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance();
|
|
|
|
+ case 3:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance();
|
|
|
|
+ case 4:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance();
|
|
|
|
+ case 5:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance();
|
|
|
|
+ case 6:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance();
|
|
|
|
+ case 7:
|
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance();
|
|
|
|
+ default:
|
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static Stub newStub(
|
|
|
|
+ com.google.protobuf.RpcChannel channel) {
|
|
|
|
+ return new Stub(channel);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService implements Interface {
|
|
|
|
+ private Stub(com.google.protobuf.RpcChannel channel) {
|
|
|
|
+ this.channel = channel;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private final com.google.protobuf.RpcChannel channel;
|
|
|
|
+
|
|
|
|
+ public com.google.protobuf.RpcChannel getChannel() {
|
|
|
|
+ return channel;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void registerDatanode(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void sendHeartbeat(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void blockReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(2),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void blockReceivedAndDeleted(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(3),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void errorReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(4),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void processUpgrade(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(5),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void reportBadBlocks(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(6),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public void commitBlockSynchronization(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request,
|
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto> done) {
|
|
|
|
+ channel.callMethod(
|
|
|
|
+ getDescriptor().getMethods().get(7),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance(),
|
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
|
+ done,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance()));
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public static BlockingInterface newBlockingStub(
|
|
|
|
+ com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
|
+ return new BlockingStub(channel);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public interface BlockingInterface {
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto processUpgrade(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final class BlockingStub implements BlockingInterface {
|
|
|
|
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
|
+ this.channel = channel;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private final com.google.protobuf.BlockingRpcChannel channel;
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto registerDatanode(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto sendHeartbeat(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto blockReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(2),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(3),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto errorReport(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(4),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto processUpgrade(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(5),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto reportBadBlocks(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(6),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto commitBlockSynchronization(
|
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto request)
|
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto) channel.callBlockingMethod(
|
|
|
|
+ getDescriptor().getMethods().get(7),
|
|
|
|
+ controller,
|
|
|
|
+ request,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.getDefaultInstance());
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_DatanodeRegistrationProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_DatanodeRegistrationProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_DatanodeCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_DatanodeCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BalancerBandwidthCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BalancerBandwidthCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BlockCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BlockCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BlockRecoveryCommndProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BlockRecoveryCommndProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_FinalizeCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_FinalizeCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_KeyUpdateCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_KeyUpdateCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_RegisterCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_RegisterCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_UpgradeCommandProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_UpgradeCommandProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_RegisterDatanodeRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_RegisterDatanodeRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_RegisterDatanodeResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_RegisterDatanodeResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_HeartbeatRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_HeartbeatRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_HeartbeatResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_HeartbeatResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BlockReportRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BlockReportRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BlockReportResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BlockReportResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ReceivedDeletedBlockInfoProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BlockReceivedAndDeletedRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_BlockReceivedAndDeletedResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ErrorReportRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ErrorReportRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ErrorReportResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ErrorReportResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ProcessUpgradeRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ProcessUpgradeRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ProcessUpgradeResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ProcessUpgradeResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ReportBadBlocksRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ReportBadBlocksRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_ReportBadBlocksResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_ReportBadBlocksResponseProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_CommitBlockSynchronizationRequestProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable;
|
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
|
+ internal_static_CommitBlockSynchronizationResponseProto_descriptor;
|
|
|
|
+ private static
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
|
+ internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable;
|
|
|
|
+
|
|
|
|
+ public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
|
+ getDescriptor() {
|
|
|
|
+ return descriptor;
|
|
|
|
+ }
|
|
|
|
+ private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
|
+ descriptor;
|
|
|
|
+ static {
|
|
|
|
+ java.lang.String[] descriptorData = {
|
|
|
|
+ "\n\026DatanodeProtocol.proto\032\nhdfs.proto\"\220\001\n" +
|
|
|
|
+ "\031DatanodeRegistrationProto\022$\n\ndatanodeID" +
|
|
|
|
+ "\030\001 \002(\0132\020.DatanodeIDProto\022&\n\013storateInfo\030" +
|
|
|
|
+ "\002 \002(\0132\021.StorageInfoProto\022%\n\004keys\030\003 \002(\0132\027" +
|
|
|
|
+ ".ExportedBlockKeysProto\"\243\004\n\024DatanodeComm" +
|
|
|
|
+ "andProto\022+\n\007cmdType\030\001 \002(\0162\032.DatanodeComm" +
|
|
|
|
+ "andProto.Type\0223\n\013balancerCmd\030\002 \001(\0132\036.Bal" +
|
|
|
|
+ "ancerBandwidthCommandProto\022\"\n\006blkCmd\030\003 \001" +
|
|
|
|
+ "(\0132\022.BlockCommandProto\022.\n\013recoveryCmd\030\004 " +
|
|
|
|
+ "\001(\0132\031.BlockRecoveryCommndProto\022*\n\013finali",
|
|
|
|
+ "zeCmd\030\005 \001(\0132\025.FinalizeCommandProto\022,\n\014ke" +
|
|
|
|
+ "yUpdateCmd\030\006 \001(\0132\026.KeyUpdateCommandProto" +
|
|
|
|
+ "\022*\n\013registerCmd\030\007 \001(\0132\025.RegisterCommandP" +
|
|
|
|
+ "roto\022(\n\nupgradeCmd\030\010 \001(\0132\024.UpgradeComman" +
|
|
|
|
+ "dProto\"\244\001\n\004Type\022\034\n\030BalancerBandwidthComm" +
|
|
|
|
+ "and\020\000\022\020\n\014BlockCommand\020\001\022\030\n\024BlockRecovery" +
|
|
|
|
+ "Command\020\002\022\023\n\017FinalizeCommand\020\003\022\024\n\020KeyUpd" +
|
|
|
|
+ "ateCommand\020\004\022\023\n\017RegisterCommand\020\005\022\022\n\016Upg" +
|
|
|
|
+ "radeCommand\020\006\"2\n\035BalancerBandwidthComman" +
|
|
|
|
+ "dProto\022\021\n\tbandwidth\030\001 \002(\004\"\274\001\n\021BlockComma",
|
|
|
|
+ "ndProto\022\016\n\006action\030\001 \002(\r\022\023\n\013blockPoolId\030\002" +
|
|
|
|
+ " \002(\t\022\033\n\006blocks\030\003 \003(\0132\013.BlockProto\022\"\n\007tar" +
|
|
|
|
+ "gets\030\004 \003(\0132\021.DatanodeIDsProto\"A\n\006Action\022" +
|
|
|
|
+ "\013\n\007UNKNOWN\020\000\022\014\n\010TRANSFER\020\001\022\016\n\nINVALIDATE" +
|
|
|
|
+ "\020\002\022\014\n\010SHUTDOWN\020\003\"A\n\030BlockRecoveryCommndP" +
|
|
|
|
+ "roto\022%\n\006blocks\030\001 \003(\0132\025.RecoveringBlockPr" +
|
|
|
|
+ "oto\"+\n\024FinalizeCommandProto\022\023\n\013blockPool" +
|
|
|
|
+ "Id\030\001 \002(\t\">\n\025KeyUpdateCommandProto\022%\n\004key" +
|
|
|
|
+ "s\030\001 \002(\0132\027.ExportedBlockKeysProto\"\026\n\024Regi" +
|
|
|
|
+ "sterCommandProto\"\212\001\n\023UpgradeCommandProto",
|
|
|
|
+ "\022\016\n\006action\030\001 \002(\r\022\017\n\007version\030\002 \002(\r\022\025\n\rupg" +
|
|
|
|
+ "radeStatus\030\003 \002(\r\";\n\006Action\022\013\n\007UNKNOWN\020\000\022" +
|
|
|
|
+ "\021\n\rREPORT_STATUS\020d\022\021\n\rSTART_UPGRADE\020e\"P\n" +
|
|
|
|
+ "\034RegisterDatanodeRequestProto\0220\n\014registr" +
|
|
|
|
+ "ation\030\001 \002(\0132\032.DatanodeRegistrationProto\"" +
|
|
|
|
+ "Q\n\035RegisterDatanodeResponseProto\0220\n\014regi" +
|
|
|
|
+ "stration\030\001 \002(\0132\032.DatanodeRegistrationPro" +
|
|
|
|
+ "to\"\334\001\n\025HeartbeatRequestProto\0220\n\014registra" +
|
|
|
|
+ "tion\030\001 \002(\0132\032.DatanodeRegistrationProto\022\020" +
|
|
|
|
+ "\n\010capacity\030\002 \002(\004\022\017\n\007dfsUsed\030\003 \002(\004\022\021\n\trem",
|
|
|
|
+ "aining\030\004 \002(\004\022\025\n\rblockPoolUsed\030\005 \002(\004\022\027\n\017x" +
|
|
|
|
+ "mitsInProgress\030\006 \002(\r\022\024\n\014xceiverCount\030\007 \002" +
|
|
|
|
+ "(\r\022\025\n\rfailedVolumes\030\010 \002(\r\"=\n\026HeartbeatRe" +
|
|
|
|
+ "sponseProto\022#\n\004cmds\030\001 \003(\0132\025.DatanodeComm" +
|
|
|
|
+ "andProto\"t\n\027BlockReportRequestProto\0220\n\014r" +
|
|
|
|
+ "egistration\030\001 \002(\0132\032.DatanodeRegistration" +
|
|
|
|
+ "Proto\022\023\n\013blockPoolId\030\002 \002(\t\022\022\n\006blocks\030\003 \003" +
|
|
|
|
+ "(\004B\002\020\001\">\n\030BlockReportResponseProto\022\"\n\003cm" +
|
|
|
|
+ "d\030\001 \002(\0132\025.DatanodeCommandProto\"O\n\035Receiv" +
|
|
|
|
+ "edDeletedBlockInfoProto\022\032\n\005block\030\001 \002(\0132\013",
|
|
|
|
+ ".BlockProto\022\022\n\ndeleteHint\030\002 \001(\t\"\234\001\n#Bloc" +
|
|
|
|
+ "kReceivedAndDeletedRequestProto\0220\n\014regis" +
|
|
|
|
+ "tration\030\001 \002(\0132\032.DatanodeRegistrationProt" +
|
|
|
|
+ "o\022\023\n\013blockPoolId\030\002 \002(\t\022.\n\006blocks\030\003 \003(\0132\036" +
|
|
|
|
+ ".ReceivedDeletedBlockInfoProto\"&\n$BlockR" +
|
|
|
|
+ "eceivedAndDeletedResponseProto\"\275\001\n\027Error" +
|
|
|
|
+ "ReportRequestProto\0220\n\014registartion\030\001 \002(\013" +
|
|
|
|
+ "2\032.DatanodeRegistrationProto\022\021\n\terrorCod" +
|
|
|
|
+ "e\030\002 \002(\r\022\013\n\003msg\030\003 \002(\t\"P\n\tErrorCode\022\n\n\006NOT" +
|
|
|
|
+ "IFY\020\000\022\016\n\nDISK_ERROR\020\001\022\021\n\rINVALID_BLOCK\020\002",
|
|
|
|
+ "\022\024\n\020FATAL_DISK_ERROR\020\003\"\032\n\030ErrorReportRes" +
|
|
|
|
+ "ponseProto\"?\n\032ProcessUpgradeRequestProto" +
|
|
|
|
+ "\022!\n\003cmd\030\001 \001(\0132\024.UpgradeCommandProto\"@\n\033P" +
|
|
|
|
+ "rocessUpgradeResponseProto\022!\n\003cmd\030\001 \001(\0132" +
|
|
|
|
+ "\024.UpgradeCommandProto\"A\n\033ReportBadBlocks" +
|
|
|
|
+ "RequestProto\022\"\n\006blocks\030\001 \003(\0132\022.LocatedBl" +
|
|
|
|
+ "ockProto\"\036\n\034ReportBadBlocksResponseProto" +
|
|
|
|
+ "\"\303\001\n&CommitBlockSynchronizationRequestPr" +
|
|
|
|
+ "oto\022\"\n\005block\030\001 \002(\0132\023.ExtendedBlockProto\022" +
|
|
|
|
+ "\023\n\013newGenStamp\030\002 \002(\004\022\021\n\tnewLength\030\003 \002(\004\022",
|
|
|
|
+ "\021\n\tcloseFile\030\004 \002(\010\022\023\n\013deleteBlock\030\005 \002(\010\022" +
|
|
|
|
+ "%\n\013newTaragets\030\006 \003(\0132\020.DatanodeIDProto\")" +
|
|
|
|
+ "\n\'CommitBlockSynchronizationResponseProt" +
|
|
|
|
+ "o2\254\005\n\027DatanodeProtocolService\022Q\n\020registe" +
|
|
|
|
+ "rDatanode\022\035.RegisterDatanodeRequestProto" +
|
|
|
|
+ "\032\036.RegisterDatanodeResponseProto\022@\n\rsend" +
|
|
|
|
+ "Heartbeat\022\026.HeartbeatRequestProto\032\027.Hear" +
|
|
|
|
+ "tbeatResponseProto\022B\n\013blockReport\022\030.Bloc" +
|
|
|
|
+ "kReportRequestProto\032\031.BlockReportRespons" +
|
|
|
|
+ "eProto\022f\n\027blockReceivedAndDeleted\022$.Bloc",
|
|
|
|
+ "kReceivedAndDeletedRequestProto\032%.BlockR" +
|
|
|
|
+ "eceivedAndDeletedResponseProto\022B\n\013errorR" +
|
|
|
|
+ "eport\022\030.ErrorReportRequestProto\032\031.ErrorR" +
|
|
|
|
+ "eportResponseProto\022K\n\016processUpgrade\022\033.P" +
|
|
|
|
+ "rocessUpgradeRequestProto\032\034.ProcessUpgra" +
|
|
|
|
+ "deResponseProto\022N\n\017reportBadBlocks\022\034.Rep" +
|
|
|
|
+ "ortBadBlocksRequestProto\032\035.ReportBadBloc" +
|
|
|
|
+ "ksResponseProto\022o\n\032commitBlockSynchroniz" +
|
|
|
|
+ "ation\022\'.CommitBlockSynchronizationReques" +
|
|
|
|
+ "tProto\032(.CommitBlockSynchronizationRespo",
|
|
|
|
+ "nseProtoBE\n%org.apache.hadoop.hdfs.proto" +
|
|
|
|
+ "col.protoB\026DatanodeProtocolProtos\210\001\001\240\001\001"
|
|
|
|
+ };
|
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
|
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
|
+ descriptor = root;
|
|
|
|
+ internal_static_DatanodeRegistrationProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(0);
|
|
|
|
+ internal_static_DatanodeRegistrationProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_DatanodeRegistrationProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "DatanodeID", "StorateInfo", "Keys", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto.Builder.class);
|
|
|
|
+ internal_static_DatanodeCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(1);
|
|
|
|
+ internal_static_DatanodeCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_DatanodeCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "CmdType", "BalancerCmd", "BlkCmd", "RecoveryCmd", "FinalizeCmd", "KeyUpdateCmd", "RegisterCmd", "UpgradeCmd", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Builder.class);
|
|
|
|
+ internal_static_BalancerBandwidthCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(2);
|
|
|
|
+ internal_static_BalancerBandwidthCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BalancerBandwidthCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Bandwidth", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.Builder.class);
|
|
|
|
+ internal_static_BlockCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(3);
|
|
|
|
+ internal_static_BlockCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BlockCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Action", "BlockPoolId", "Blocks", "Targets", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.Builder.class);
|
|
|
|
+ internal_static_BlockRecoveryCommndProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(4);
|
|
|
|
+ internal_static_BlockRecoveryCommndProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BlockRecoveryCommndProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Blocks", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommndProto.Builder.class);
|
|
|
|
+ internal_static_FinalizeCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(5);
|
|
|
|
+ internal_static_FinalizeCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_FinalizeCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "BlockPoolId", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.Builder.class);
|
|
|
|
+ internal_static_KeyUpdateCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(6);
|
|
|
|
+ internal_static_KeyUpdateCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_KeyUpdateCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Keys", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.Builder.class);
|
|
|
|
+ internal_static_RegisterCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(7);
|
|
|
|
+ internal_static_RegisterCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_RegisterCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.Builder.class);
|
|
|
|
+ internal_static_UpgradeCommandProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(8);
|
|
|
|
+ internal_static_UpgradeCommandProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_UpgradeCommandProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Action", "Version", "UpgradeStatus", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto.Builder.class);
|
|
|
|
+ internal_static_RegisterDatanodeRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(9);
|
|
|
|
+ internal_static_RegisterDatanodeRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_RegisterDatanodeRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Registration", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto.Builder.class);
|
|
|
|
+ internal_static_RegisterDatanodeResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(10);
|
|
|
|
+ internal_static_RegisterDatanodeResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_RegisterDatanodeResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Registration", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto.Builder.class);
|
|
|
|
+ internal_static_HeartbeatRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(11);
|
|
|
|
+ internal_static_HeartbeatRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_HeartbeatRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Registration", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "XmitsInProgress", "XceiverCount", "FailedVolumes", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto.Builder.class);
|
|
|
|
+ internal_static_HeartbeatResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(12);
|
|
|
|
+ internal_static_HeartbeatResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_HeartbeatResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Cmds", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto.Builder.class);
|
|
|
|
+ internal_static_BlockReportRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(13);
|
|
|
|
+ internal_static_BlockReportRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BlockReportRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto.Builder.class);
|
|
|
|
+ internal_static_BlockReportResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(14);
|
|
|
|
+ internal_static_BlockReportResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BlockReportResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Cmd", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto.Builder.class);
|
|
|
|
+ internal_static_ReceivedDeletedBlockInfoProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(15);
|
|
|
|
+ internal_static_ReceivedDeletedBlockInfoProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ReceivedDeletedBlockInfoProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Block", "DeleteHint", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto.Builder.class);
|
|
|
|
+ internal_static_BlockReceivedAndDeletedRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(16);
|
|
|
|
+ internal_static_BlockReceivedAndDeletedRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BlockReceivedAndDeletedRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Registration", "BlockPoolId", "Blocks", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto.Builder.class);
|
|
|
|
+ internal_static_BlockReceivedAndDeletedResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(17);
|
|
|
|
+ internal_static_BlockReceivedAndDeletedResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_BlockReceivedAndDeletedResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto.Builder.class);
|
|
|
|
+ internal_static_ErrorReportRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(18);
|
|
|
|
+ internal_static_ErrorReportRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ErrorReportRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Registartion", "ErrorCode", "Msg", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto.Builder.class);
|
|
|
|
+ internal_static_ErrorReportResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(19);
|
|
|
|
+ internal_static_ErrorReportResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ErrorReportResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto.Builder.class);
|
|
|
|
+ internal_static_ProcessUpgradeRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(20);
|
|
|
|
+ internal_static_ProcessUpgradeRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ProcessUpgradeRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Cmd", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto.Builder.class);
|
|
|
|
+ internal_static_ProcessUpgradeResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(21);
|
|
|
|
+ internal_static_ProcessUpgradeResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ProcessUpgradeResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Cmd", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto.Builder.class);
|
|
|
|
+ internal_static_ReportBadBlocksRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(22);
|
|
|
|
+ internal_static_ReportBadBlocksRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ReportBadBlocksRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Blocks", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto.Builder.class);
|
|
|
|
+ internal_static_ReportBadBlocksResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(23);
|
|
|
|
+ internal_static_ReportBadBlocksResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_ReportBadBlocksResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto.Builder.class);
|
|
|
|
+ internal_static_CommitBlockSynchronizationRequestProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(24);
|
|
|
|
+ internal_static_CommitBlockSynchronizationRequestProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_CommitBlockSynchronizationRequestProto_descriptor,
|
|
|
|
+ new java.lang.String[] { "Block", "NewGenStamp", "NewLength", "CloseFile", "DeleteBlock", "NewTaragets", },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto.Builder.class);
|
|
|
|
+ internal_static_CommitBlockSynchronizationResponseProto_descriptor =
|
|
|
|
+ getDescriptor().getMessageTypes().get(25);
|
|
|
|
+ internal_static_CommitBlockSynchronizationResponseProto_fieldAccessorTable = new
|
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
|
+ internal_static_CommitBlockSynchronizationResponseProto_descriptor,
|
|
|
|
+ new java.lang.String[] { },
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.class,
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto.Builder.class);
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+ };
|
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor
|
|
|
|
+ .internalBuildGeneratedFileFrom(descriptorData,
|
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
|
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
|
|
|
|
+ }, assigner);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // @@protoc_insertion_point(outer_class_scope)
|
|
|
|
+}
|