|
@@ -0,0 +1,2234 @@
|
|
|
+// Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
|
+// source: JournalProtocol.proto
|
|
|
+
|
|
|
+package org.apache.hadoop.hdfs.protocol.proto;
|
|
|
+
|
|
|
+public final class JournalProtocolProtos {
|
|
|
+ private JournalProtocolProtos() {}
|
|
|
+ public static void registerAllExtensions(
|
|
|
+ com.google.protobuf.ExtensionRegistry registry) {
|
|
|
+ }
|
|
|
+ public interface JournalRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .NamenodeRegistrationProto registration = 1;
|
|
|
+ boolean hasRegistration();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
+
|
|
|
+ // required uint64 firstTxnId = 2;
|
|
|
+ boolean hasFirstTxnId();
|
|
|
+ long getFirstTxnId();
|
|
|
+
|
|
|
+ // required uint32 numTxns = 3;
|
|
|
+ boolean hasNumTxns();
|
|
|
+ int getNumTxns();
|
|
|
+
|
|
|
+ // required bytes records = 4;
|
|
|
+ boolean hasRecords();
|
|
|
+ com.google.protobuf.ByteString getRecords();
|
|
|
+ }
|
|
|
+ public static final class JournalRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements JournalRequestProtoOrBuilder {
|
|
|
+ // Use JournalRequestProto.newBuilder() to construct.
|
|
|
+ private JournalRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private JournalRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final JournalRequestProto defaultInstance;
|
|
|
+ public static JournalRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public JournalRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .NamenodeRegistrationProto registration = 1;
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_;
|
|
|
+ public boolean hasRegistration() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() {
|
|
|
+ return registration_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
+ return registration_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 firstTxnId = 2;
|
|
|
+ public static final int FIRSTTXNID_FIELD_NUMBER = 2;
|
|
|
+ private long firstTxnId_;
|
|
|
+ public boolean hasFirstTxnId() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getFirstTxnId() {
|
|
|
+ return firstTxnId_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 numTxns = 3;
|
|
|
+ public static final int NUMTXNS_FIELD_NUMBER = 3;
|
|
|
+ private int numTxns_;
|
|
|
+ public boolean hasNumTxns() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public int getNumTxns() {
|
|
|
+ return numTxns_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bytes records = 4;
|
|
|
+ public static final int RECORDS_FIELD_NUMBER = 4;
|
|
|
+ private com.google.protobuf.ByteString records_;
|
|
|
+ public boolean hasRecords() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public com.google.protobuf.ByteString getRecords() {
|
|
|
+ return records_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ firstTxnId_ = 0L;
|
|
|
+ numTxns_ = 0;
|
|
|
+ records_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasRegistration()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasFirstTxnId()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasNumTxns()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasRecords()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt64(2, firstTxnId_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeUInt32(3, numTxns_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeBytes(4, records_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(2, firstTxnId_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(3, numTxns_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(4, records_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
+ if (hasRegistration()) {
|
|
|
+ result = result && getRegistration()
|
|
|
+ .equals(other.getRegistration());
|
|
|
+ }
|
|
|
+ result = result && (hasFirstTxnId() == other.hasFirstTxnId());
|
|
|
+ if (hasFirstTxnId()) {
|
|
|
+ result = result && (getFirstTxnId()
|
|
|
+ == other.getFirstTxnId());
|
|
|
+ }
|
|
|
+ result = result && (hasNumTxns() == other.hasNumTxns());
|
|
|
+ if (hasNumTxns()) {
|
|
|
+ result = result && (getNumTxns()
|
|
|
+ == other.getNumTxns());
|
|
|
+ }
|
|
|
+ result = result && (hasRecords() == other.hasRecords());
|
|
|
+ if (hasRecords()) {
|
|
|
+ result = result && getRecords()
|
|
|
+ .equals(other.getRecords());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasRegistration()) {
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
+ }
|
|
|
+ if (hasFirstTxnId()) {
|
|
|
+ hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getFirstTxnId());
|
|
|
+ }
|
|
|
+ if (hasNumTxns()) {
|
|
|
+ hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getNumTxns();
|
|
|
+ }
|
|
|
+ if (hasRecords()) {
|
|
|
+ hash = (37 * hash) + RECORDS_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getRecords().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ firstTxnId_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ numTxns_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ records_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ result.registration_ = registration_;
|
|
|
+ } else {
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.firstTxnId_ = firstTxnId_;
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.numTxns_ = numTxns_;
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ result.records_ = records_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasRegistration()) {
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
+ }
|
|
|
+ if (other.hasFirstTxnId()) {
|
|
|
+ setFirstTxnId(other.getFirstTxnId());
|
|
|
+ }
|
|
|
+ if (other.hasNumTxns()) {
|
|
|
+ setNumTxns(other.getNumTxns());
|
|
|
+ }
|
|
|
+ if (other.hasRecords()) {
|
|
|
+ setRecords(other.getRecords());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasRegistration()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasFirstTxnId()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasNumTxns()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasRecords()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder();
|
|
|
+ if (hasRegistration()) {
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ firstTxnId_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 24: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ numTxns_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 34: {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ records_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .NamenodeRegistrationProto registration = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
+ public boolean hasRegistration() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ return registration_;
|
|
|
+ } else {
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ registration_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setRegistration(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registration_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) {
|
|
|
+ registration_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ registration_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearRegistration() {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return registration_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>(
|
|
|
+ registration_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ registration_ = null;
|
|
|
+ }
|
|
|
+ return registrationBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 firstTxnId = 2;
|
|
|
+ private long firstTxnId_ ;
|
|
|
+ public boolean hasFirstTxnId() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getFirstTxnId() {
|
|
|
+ return firstTxnId_;
|
|
|
+ }
|
|
|
+ public Builder setFirstTxnId(long value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ firstTxnId_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearFirstTxnId() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ firstTxnId_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 numTxns = 3;
|
|
|
+ private int numTxns_ ;
|
|
|
+ public boolean hasNumTxns() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public int getNumTxns() {
|
|
|
+ return numTxns_;
|
|
|
+ }
|
|
|
+ public Builder setNumTxns(int value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ numTxns_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearNumTxns() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ numTxns_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bytes records = 4;
|
|
|
+ private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ public boolean hasRecords() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public com.google.protobuf.ByteString getRecords() {
|
|
|
+ return records_;
|
|
|
+ }
|
|
|
+ public Builder setRecords(com.google.protobuf.ByteString value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ records_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearRecords() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ records_ = getDefaultInstance().getRecords();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:JournalRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new JournalRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:JournalRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface JournalResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+ }
|
|
|
+ public static final class JournalResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements JournalResponseProtoOrBuilder {
|
|
|
+ // Use JournalResponseProto.newBuilder() to construct.
|
|
|
+ private JournalResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private JournalResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final JournalResponseProto defaultInstance;
|
|
|
+ public static JournalResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public JournalResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_JournalResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto(this);
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:JournalResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new JournalResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:JournalResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface StartLogSegmentRequestProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .NamenodeRegistrationProto registration = 1;
|
|
|
+ boolean hasRegistration();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder();
|
|
|
+
|
|
|
+ // required uint64 txid = 2;
|
|
|
+ boolean hasTxid();
|
|
|
+ long getTxid();
|
|
|
+ }
|
|
|
+ public static final class StartLogSegmentRequestProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements StartLogSegmentRequestProtoOrBuilder {
|
|
|
+ // Use StartLogSegmentRequestProto.newBuilder() to construct.
|
|
|
+ private StartLogSegmentRequestProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private StartLogSegmentRequestProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final StartLogSegmentRequestProto defaultInstance;
|
|
|
+ public static StartLogSegmentRequestProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public StartLogSegmentRequestProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .NamenodeRegistrationProto registration = 1;
|
|
|
+ public static final int REGISTRATION_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_;
|
|
|
+ public boolean hasRegistration() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() {
|
|
|
+ return registration_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
+ return registration_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 txid = 2;
|
|
|
+ public static final int TXID_FIELD_NUMBER = 2;
|
|
|
+ private long txid_;
|
|
|
+ public boolean hasTxid() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getTxid() {
|
|
|
+ return txid_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ txid_ = 0L;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasRegistration()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasTxid()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, registration_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt64(2, txid_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, registration_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(2, txid_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasRegistration() == other.hasRegistration());
|
|
|
+ if (hasRegistration()) {
|
|
|
+ result = result && getRegistration()
|
|
|
+ .equals(other.getRegistration());
|
|
|
+ }
|
|
|
+ result = result && (hasTxid() == other.hasTxid());
|
|
|
+ if (hasTxid()) {
|
|
|
+ result = result && (getTxid()
|
|
|
+ == other.getTxid());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasRegistration()) {
|
|
|
+ hash = (37 * hash) + REGISTRATION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getRegistration().hashCode();
|
|
|
+ }
|
|
|
+ if (hasTxid()) {
|
|
|
+ hash = (37 * hash) + TXID_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getTxid());
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getRegistrationFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ txid_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ result.registration_ = registration_;
|
|
|
+ } else {
|
|
|
+ result.registration_ = registrationBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.txid_ = txid_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasRegistration()) {
|
|
|
+ mergeRegistration(other.getRegistration());
|
|
|
+ }
|
|
|
+ if (other.hasTxid()) {
|
|
|
+ setTxid(other.getTxid());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasRegistration()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasTxid()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getRegistration().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder();
|
|
|
+ if (hasRegistration()) {
|
|
|
+ subBuilder.mergeFrom(getRegistration());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setRegistration(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ txid_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .NamenodeRegistrationProto registration = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder> registrationBuilder_;
|
|
|
+ public boolean hasRegistration() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto getRegistration() {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ return registration_;
|
|
|
+ } else {
|
|
|
+ return registrationBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ registration_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setRegistration(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder builderForValue) {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registration_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeRegistration(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto value) {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ registration_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance()) {
|
|
|
+ registration_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.newBuilder(registration_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ registration_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearRegistration() {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registration_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ registrationBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder getRegistrationBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getRegistrationFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder getRegistrationOrBuilder() {
|
|
|
+ if (registrationBuilder_ != null) {
|
|
|
+ return registrationBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return registration_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>
|
|
|
+ getRegistrationFieldBuilder() {
|
|
|
+ if (registrationBuilder_ == null) {
|
|
|
+ registrationBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProtoOrBuilder>(
|
|
|
+ registration_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ registration_ = null;
|
|
|
+ }
|
|
|
+ return registrationBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 txid = 2;
|
|
|
+ private long txid_ ;
|
|
|
+ public boolean hasTxid() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getTxid() {
|
|
|
+ return txid_;
|
|
|
+ }
|
|
|
+ public Builder setTxid(long value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ txid_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearTxid() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ txid_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:StartLogSegmentRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new StartLogSegmentRequestProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:StartLogSegmentRequestProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface StartLogSegmentResponseProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+ }
|
|
|
+ public static final class StartLogSegmentResponseProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements StartLogSegmentResponseProtoOrBuilder {
|
|
|
+ // Use StartLogSegmentResponseProto.newBuilder() to construct.
|
|
|
+ private StartLogSegmentResponseProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private StartLogSegmentResponseProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final StartLogSegmentResponseProto defaultInstance;
|
|
|
+ public static StartLogSegmentResponseProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public StartLogSegmentResponseProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto(this);
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:StartLogSegmentResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new StartLogSegmentResponseProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:StartLogSegmentResponseProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public static abstract class JournalProtocolService
|
|
|
+ implements com.google.protobuf.Service {
|
|
|
+ protected JournalProtocolService() {}
|
|
|
+
|
|
|
+ public interface Interface {
|
|
|
+ public abstract void journal(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void startLogSegment(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto> done);
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.Service newReflectiveService(
|
|
|
+ final Interface impl) {
|
|
|
+ return new JournalProtocolService() {
|
|
|
+ @java.lang.Override
|
|
|
+ public void journal(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto> done) {
|
|
|
+ impl.journal(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public void startLogSegment(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto> done) {
|
|
|
+ impl.startLogSegment(controller, request, done);
|
|
|
+ }
|
|
|
+
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.BlockingService
|
|
|
+ newReflectiveBlockingService(final BlockingInterface impl) {
|
|
|
+ return new com.google.protobuf.BlockingService() {
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message callBlockingMethod(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ com.google.protobuf.Message request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.callBlockingMethod() given method descriptor for " +
|
|
|
+ "wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return impl.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request);
|
|
|
+ case 1:
|
|
|
+ return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request);
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getRequestPrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getResponsePrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ public abstract void journal(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto> done);
|
|
|
+
|
|
|
+ public abstract void startLogSegment(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto> done);
|
|
|
+
|
|
|
+ public static final
|
|
|
+ com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.getDescriptor().getServices().get(0);
|
|
|
+ }
|
|
|
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public final void callMethod(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method,
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ com.google.protobuf.Message request,
|
|
|
+ com.google.protobuf.RpcCallback<
|
|
|
+ com.google.protobuf.Message> done) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.callMethod() given method descriptor for wrong " +
|
|
|
+ "service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ this.journal(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ case 1:
|
|
|
+ this.startLogSegment(controller, (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto)request,
|
|
|
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
|
|
|
+ done));
|
|
|
+ return;
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getRequestPrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getRequestPrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public final com.google.protobuf.Message
|
|
|
+ getResponsePrototype(
|
|
|
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
|
|
|
+ if (method.getService() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "Service.getResponsePrototype() given method " +
|
|
|
+ "descriptor for wrong service type.");
|
|
|
+ }
|
|
|
+ switch(method.getIndex()) {
|
|
|
+ case 0:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance();
|
|
|
+ case 1:
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
|
|
|
+ default:
|
|
|
+ throw new java.lang.AssertionError("Can't get here.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Stub newStub(
|
|
|
+ com.google.protobuf.RpcChannel channel) {
|
|
|
+ return new Stub(channel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final class Stub extends org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService implements Interface {
|
|
|
+ private Stub(com.google.protobuf.RpcChannel channel) {
|
|
|
+ this.channel = channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ private final com.google.protobuf.RpcChannel channel;
|
|
|
+
|
|
|
+ public com.google.protobuf.RpcChannel getChannel() {
|
|
|
+ return channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void journal(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+
|
|
|
+ public void startLogSegment(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request,
|
|
|
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto> done) {
|
|
|
+ channel.callMethod(
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
|
|
|
+ com.google.protobuf.RpcUtil.generalizeCallback(
|
|
|
+ done,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static BlockingInterface newBlockingStub(
|
|
|
+ com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
+ return new BlockingStub(channel);
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface BlockingInterface {
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final class BlockingStub implements BlockingInterface {
|
|
|
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
|
|
|
+ this.channel = channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ private final com.google.protobuf.BlockingRpcChannel channel;
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto journal(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(0),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
|
|
|
+ com.google.protobuf.RpcController controller,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto request)
|
|
|
+ throws com.google.protobuf.ServiceException {
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
|
|
|
+ getDescriptor().getMethods().get(1),
|
|
|
+ controller,
|
|
|
+ request,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_JournalRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_JournalRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_JournalResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_JournalResponseProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_StartLogSegmentRequestProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_StartLogSegmentRequestProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_StartLogSegmentResponseProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_StartLogSegmentResponseProto_fieldAccessorTable;
|
|
|
+
|
|
|
+ public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return descriptor;
|
|
|
+ }
|
|
|
+ private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ descriptor;
|
|
|
+ static {
|
|
|
+ java.lang.String[] descriptorData = {
|
|
|
+ "\n\025JournalProtocol.proto\032\nhdfs.proto\"}\n\023J" +
|
|
|
+ "ournalRequestProto\0220\n\014registration\030\001 \002(\013" +
|
|
|
+ "2\032.NamenodeRegistrationProto\022\022\n\nfirstTxn" +
|
|
|
+ "Id\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007records\030\004 \002" +
|
|
|
+ "(\014\"\026\n\024JournalResponseProto\"]\n\033StartLogSe" +
|
|
|
+ "gmentRequestProto\0220\n\014registration\030\001 \002(\0132" +
|
|
|
+ "\032.NamenodeRegistrationProto\022\014\n\004txid\030\002 \002(" +
|
|
|
+ "\004\"\036\n\034StartLogSegmentResponseProto2\240\001\n\026Jo" +
|
|
|
+ "urnalProtocolService\0226\n\007journal\022\024.Journa" +
|
|
|
+ "lRequestProto\032\025.JournalResponseProto\022N\n\017",
|
|
|
+ "startLogSegment\022\034.StartLogSegmentRequest" +
|
|
|
+ "Proto\032\035.StartLogSegmentResponseProtoBD\n%" +
|
|
|
+ "org.apache.hadoop.hdfs.protocol.protoB\025J" +
|
|
|
+ "ournalProtocolProtos\210\001\001\240\001\001"
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
+ descriptor = root;
|
|
|
+ internal_static_JournalRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(0);
|
|
|
+ internal_static_JournalRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_JournalRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "Registration", "FirstTxnId", "NumTxns", "Records", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto.Builder.class);
|
|
|
+ internal_static_JournalResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(1);
|
|
|
+ internal_static_JournalResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_JournalResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto.Builder.class);
|
|
|
+ internal_static_StartLogSegmentRequestProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(2);
|
|
|
+ internal_static_StartLogSegmentRequestProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_StartLogSegmentRequestProto_descriptor,
|
|
|
+ new java.lang.String[] { "Registration", "Txid", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
|
|
|
+ internal_static_StartLogSegmentResponseProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(3);
|
|
|
+ internal_static_StartLogSegmentResponseProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_StartLogSegmentResponseProto_descriptor,
|
|
|
+ new java.lang.String[] { },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ .internalBuildGeneratedFileFrom(descriptorData,
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
|
|
|
+ }, assigner);
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(outer_class_scope)
|
|
|
+}
|