|
@@ -15,7 +15,6 @@
|
|
|
* See the License for the specific language governing permissions and
|
|
|
* limitations under the License.
|
|
|
*/
|
|
|
-
|
|
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
|
// source: hdfs.proto
|
|
|
|
|
@@ -37,13 +36,13 @@ public final class HdfsProtos {
|
|
|
boolean hasBlockId();
|
|
|
long getBlockId();
|
|
|
|
|
|
- // required uint64 numBytes = 3;
|
|
|
- boolean hasNumBytes();
|
|
|
- long getNumBytes();
|
|
|
-
|
|
|
- // required uint64 generationStamp = 4;
|
|
|
+ // required uint64 generationStamp = 3;
|
|
|
boolean hasGenerationStamp();
|
|
|
long getGenerationStamp();
|
|
|
+
|
|
|
+ // optional uint64 numBytes = 4;
|
|
|
+ boolean hasNumBytes();
|
|
|
+ long getNumBytes();
|
|
|
}
|
|
|
public static final class ExtendedBlockProto extends
|
|
|
com.google.protobuf.GeneratedMessage
|
|
@@ -116,31 +115,31 @@ public final class HdfsProtos {
|
|
|
return blockId_;
|
|
|
}
|
|
|
|
|
|
- // required uint64 numBytes = 3;
|
|
|
- public static final int NUMBYTES_FIELD_NUMBER = 3;
|
|
|
- private long numBytes_;
|
|
|
- public boolean hasNumBytes() {
|
|
|
+ // required uint64 generationStamp = 3;
|
|
|
+ public static final int GENERATIONSTAMP_FIELD_NUMBER = 3;
|
|
|
+ private long generationStamp_;
|
|
|
+ public boolean hasGenerationStamp() {
|
|
|
return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
}
|
|
|
- public long getNumBytes() {
|
|
|
- return numBytes_;
|
|
|
+ public long getGenerationStamp() {
|
|
|
+ return generationStamp_;
|
|
|
}
|
|
|
|
|
|
- // required uint64 generationStamp = 4;
|
|
|
- public static final int GENERATIONSTAMP_FIELD_NUMBER = 4;
|
|
|
- private long generationStamp_;
|
|
|
- public boolean hasGenerationStamp() {
|
|
|
+ // optional uint64 numBytes = 4;
|
|
|
+ public static final int NUMBYTES_FIELD_NUMBER = 4;
|
|
|
+ private long numBytes_;
|
|
|
+ public boolean hasNumBytes() {
|
|
|
return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
}
|
|
|
- public long getGenerationStamp() {
|
|
|
- return generationStamp_;
|
|
|
+ public long getNumBytes() {
|
|
|
+ return numBytes_;
|
|
|
}
|
|
|
|
|
|
private void initFields() {
|
|
|
poolId_ = "";
|
|
|
blockId_ = 0L;
|
|
|
- numBytes_ = 0L;
|
|
|
generationStamp_ = 0L;
|
|
|
+ numBytes_ = 0L;
|
|
|
}
|
|
|
private byte memoizedIsInitialized = -1;
|
|
|
public final boolean isInitialized() {
|
|
@@ -155,10 +154,6 @@ public final class HdfsProtos {
|
|
|
memoizedIsInitialized = 0;
|
|
|
return false;
|
|
|
}
|
|
|
- if (!hasNumBytes()) {
|
|
|
- memoizedIsInitialized = 0;
|
|
|
- return false;
|
|
|
- }
|
|
|
if (!hasGenerationStamp()) {
|
|
|
memoizedIsInitialized = 0;
|
|
|
return false;
|
|
@@ -177,10 +172,10 @@ public final class HdfsProtos {
|
|
|
output.writeUInt64(2, blockId_);
|
|
|
}
|
|
|
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
- output.writeUInt64(3, numBytes_);
|
|
|
+ output.writeUInt64(3, generationStamp_);
|
|
|
}
|
|
|
if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
- output.writeUInt64(4, generationStamp_);
|
|
|
+ output.writeUInt64(4, numBytes_);
|
|
|
}
|
|
|
getUnknownFields().writeTo(output);
|
|
|
}
|
|
@@ -201,11 +196,11 @@ public final class HdfsProtos {
|
|
|
}
|
|
|
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(3, numBytes_);
|
|
|
+ .computeUInt64Size(3, generationStamp_);
|
|
|
}
|
|
|
if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
size += com.google.protobuf.CodedOutputStream
|
|
|
- .computeUInt64Size(4, generationStamp_);
|
|
|
+ .computeUInt64Size(4, numBytes_);
|
|
|
}
|
|
|
size += getUnknownFields().getSerializedSize();
|
|
|
memoizedSerializedSize = size;
|
|
@@ -240,16 +235,16 @@ public final class HdfsProtos {
|
|
|
result = result && (getBlockId()
|
|
|
== other.getBlockId());
|
|
|
}
|
|
|
- result = result && (hasNumBytes() == other.hasNumBytes());
|
|
|
- if (hasNumBytes()) {
|
|
|
- result = result && (getNumBytes()
|
|
|
- == other.getNumBytes());
|
|
|
- }
|
|
|
result = result && (hasGenerationStamp() == other.hasGenerationStamp());
|
|
|
if (hasGenerationStamp()) {
|
|
|
result = result && (getGenerationStamp()
|
|
|
== other.getGenerationStamp());
|
|
|
}
|
|
|
+ result = result && (hasNumBytes() == other.hasNumBytes());
|
|
|
+ if (hasNumBytes()) {
|
|
|
+ result = result && (getNumBytes()
|
|
|
+ == other.getNumBytes());
|
|
|
+ }
|
|
|
result = result &&
|
|
|
getUnknownFields().equals(other.getUnknownFields());
|
|
|
return result;
|
|
@@ -267,14 +262,14 @@ public final class HdfsProtos {
|
|
|
hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
|
|
|
hash = (53 * hash) + hashLong(getBlockId());
|
|
|
}
|
|
|
- if (hasNumBytes()) {
|
|
|
- hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
|
|
|
- hash = (53 * hash) + hashLong(getNumBytes());
|
|
|
- }
|
|
|
if (hasGenerationStamp()) {
|
|
|
hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER;
|
|
|
hash = (53 * hash) + hashLong(getGenerationStamp());
|
|
|
}
|
|
|
+ if (hasNumBytes()) {
|
|
|
+ hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getNumBytes());
|
|
|
+ }
|
|
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
return hash;
|
|
|
}
|
|
@@ -395,9 +390,9 @@ public final class HdfsProtos {
|
|
|
bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
blockId_ = 0L;
|
|
|
bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
- numBytes_ = 0L;
|
|
|
- bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
generationStamp_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ numBytes_ = 0L;
|
|
|
bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
return this;
|
|
|
}
|
|
@@ -448,11 +443,11 @@ public final class HdfsProtos {
|
|
|
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
to_bitField0_ |= 0x00000004;
|
|
|
}
|
|
|
- result.numBytes_ = numBytes_;
|
|
|
+ result.generationStamp_ = generationStamp_;
|
|
|
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
to_bitField0_ |= 0x00000008;
|
|
|
}
|
|
|
- result.generationStamp_ = generationStamp_;
|
|
|
+ result.numBytes_ = numBytes_;
|
|
|
result.bitField0_ = to_bitField0_;
|
|
|
onBuilt();
|
|
|
return result;
|
|
@@ -475,12 +470,12 @@ public final class HdfsProtos {
|
|
|
if (other.hasBlockId()) {
|
|
|
setBlockId(other.getBlockId());
|
|
|
}
|
|
|
- if (other.hasNumBytes()) {
|
|
|
- setNumBytes(other.getNumBytes());
|
|
|
- }
|
|
|
if (other.hasGenerationStamp()) {
|
|
|
setGenerationStamp(other.getGenerationStamp());
|
|
|
}
|
|
|
+ if (other.hasNumBytes()) {
|
|
|
+ setNumBytes(other.getNumBytes());
|
|
|
+ }
|
|
|
this.mergeUnknownFields(other.getUnknownFields());
|
|
|
return this;
|
|
|
}
|
|
@@ -494,10 +489,6 @@ public final class HdfsProtos {
|
|
|
|
|
|
return false;
|
|
|
}
|
|
|
- if (!hasNumBytes()) {
|
|
|
-
|
|
|
- return false;
|
|
|
- }
|
|
|
if (!hasGenerationStamp()) {
|
|
|
|
|
|
return false;
|
|
@@ -540,12 +531,12 @@ public final class HdfsProtos {
|
|
|
}
|
|
|
case 24: {
|
|
|
bitField0_ |= 0x00000004;
|
|
|
- numBytes_ = input.readUInt64();
|
|
|
+ generationStamp_ = input.readUInt64();
|
|
|
break;
|
|
|
}
|
|
|
case 32: {
|
|
|
bitField0_ |= 0x00000008;
|
|
|
- generationStamp_ = input.readUInt64();
|
|
|
+ numBytes_ = input.readUInt64();
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
@@ -611,44 +602,44 @@ public final class HdfsProtos {
|
|
|
return this;
|
|
|
}
|
|
|
|
|
|
- // required uint64 numBytes = 3;
|
|
|
- private long numBytes_ ;
|
|
|
- public boolean hasNumBytes() {
|
|
|
+ // required uint64 generationStamp = 3;
|
|
|
+ private long generationStamp_ ;
|
|
|
+ public boolean hasGenerationStamp() {
|
|
|
return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
}
|
|
|
- public long getNumBytes() {
|
|
|
- return numBytes_;
|
|
|
+ public long getGenerationStamp() {
|
|
|
+ return generationStamp_;
|
|
|
}
|
|
|
- public Builder setNumBytes(long value) {
|
|
|
+ public Builder setGenerationStamp(long value) {
|
|
|
bitField0_ |= 0x00000004;
|
|
|
- numBytes_ = value;
|
|
|
+ generationStamp_ = value;
|
|
|
onChanged();
|
|
|
return this;
|
|
|
}
|
|
|
- public Builder clearNumBytes() {
|
|
|
+ public Builder clearGenerationStamp() {
|
|
|
bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
- numBytes_ = 0L;
|
|
|
+ generationStamp_ = 0L;
|
|
|
onChanged();
|
|
|
return this;
|
|
|
}
|
|
|
|
|
|
- // required uint64 generationStamp = 4;
|
|
|
- private long generationStamp_ ;
|
|
|
- public boolean hasGenerationStamp() {
|
|
|
+ // optional uint64 numBytes = 4;
|
|
|
+ private long numBytes_ ;
|
|
|
+ public boolean hasNumBytes() {
|
|
|
return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
}
|
|
|
- public long getGenerationStamp() {
|
|
|
- return generationStamp_;
|
|
|
+ public long getNumBytes() {
|
|
|
+ return numBytes_;
|
|
|
}
|
|
|
- public Builder setGenerationStamp(long value) {
|
|
|
+ public Builder setNumBytes(long value) {
|
|
|
bitField0_ |= 0x00000008;
|
|
|
- generationStamp_ = value;
|
|
|
+ numBytes_ = value;
|
|
|
onChanged();
|
|
|
return this;
|
|
|
}
|
|
|
- public Builder clearGenerationStamp() {
|
|
|
+ public Builder clearNumBytes() {
|
|
|
bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
- generationStamp_ = 0L;
|
|
|
+ numBytes_ = 0L;
|
|
|
onChanged();
|
|
|
return this;
|
|
|
}
|
|
@@ -1359,6 +1350,10 @@ public final class HdfsProtos {
|
|
|
// required uint32 infoPort = 3;
|
|
|
boolean hasInfoPort();
|
|
|
int getInfoPort();
|
|
|
+
|
|
|
+ // required uint32 ipcPort = 4;
|
|
|
+ boolean hasIpcPort();
|
|
|
+ int getIpcPort();
|
|
|
}
|
|
|
public static final class DatanodeIDProto extends
|
|
|
com.google.protobuf.GeneratedMessage
|
|
@@ -1463,10 +1458,21 @@ public final class HdfsProtos {
|
|
|
return infoPort_;
|
|
|
}
|
|
|
|
|
|
+ // required uint32 ipcPort = 4;
|
|
|
+ public static final int IPCPORT_FIELD_NUMBER = 4;
|
|
|
+ private int ipcPort_;
|
|
|
+ public boolean hasIpcPort() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public int getIpcPort() {
|
|
|
+ return ipcPort_;
|
|
|
+ }
|
|
|
+
|
|
|
private void initFields() {
|
|
|
name_ = "";
|
|
|
storageID_ = "";
|
|
|
infoPort_ = 0;
|
|
|
+ ipcPort_ = 0;
|
|
|
}
|
|
|
private byte memoizedIsInitialized = -1;
|
|
|
public final boolean isInitialized() {
|
|
@@ -1485,6 +1491,10 @@ public final class HdfsProtos {
|
|
|
memoizedIsInitialized = 0;
|
|
|
return false;
|
|
|
}
|
|
|
+ if (!hasIpcPort()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
memoizedIsInitialized = 1;
|
|
|
return true;
|
|
|
}
|
|
@@ -1501,6 +1511,9 @@ public final class HdfsProtos {
|
|
|
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
output.writeUInt32(3, infoPort_);
|
|
|
}
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeUInt32(4, ipcPort_);
|
|
|
+ }
|
|
|
getUnknownFields().writeTo(output);
|
|
|
}
|
|
|
|
|
@@ -1522,6 +1535,10 @@ public final class HdfsProtos {
|
|
|
size += com.google.protobuf.CodedOutputStream
|
|
|
.computeUInt32Size(3, infoPort_);
|
|
|
}
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(4, ipcPort_);
|
|
|
+ }
|
|
|
size += getUnknownFields().getSerializedSize();
|
|
|
memoizedSerializedSize = size;
|
|
|
return size;
|
|
@@ -1560,6 +1577,11 @@ public final class HdfsProtos {
|
|
|
result = result && (getInfoPort()
|
|
|
== other.getInfoPort());
|
|
|
}
|
|
|
+ result = result && (hasIpcPort() == other.hasIpcPort());
|
|
|
+ if (hasIpcPort()) {
|
|
|
+ result = result && (getIpcPort()
|
|
|
+ == other.getIpcPort());
|
|
|
+ }
|
|
|
result = result &&
|
|
|
getUnknownFields().equals(other.getUnknownFields());
|
|
|
return result;
|
|
@@ -1581,6 +1603,10 @@ public final class HdfsProtos {
|
|
|
hash = (37 * hash) + INFOPORT_FIELD_NUMBER;
|
|
|
hash = (53 * hash) + getInfoPort();
|
|
|
}
|
|
|
+ if (hasIpcPort()) {
|
|
|
+ hash = (37 * hash) + IPCPORT_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getIpcPort();
|
|
|
+ }
|
|
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
return hash;
|
|
|
}
|
|
@@ -1703,6 +1729,8 @@ public final class HdfsProtos {
|
|
|
bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
infoPort_ = 0;
|
|
|
bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ ipcPort_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
return this;
|
|
|
}
|
|
|
|
|
@@ -1753,6 +1781,10 @@ public final class HdfsProtos {
|
|
|
to_bitField0_ |= 0x00000004;
|
|
|
}
|
|
|
result.infoPort_ = infoPort_;
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ result.ipcPort_ = ipcPort_;
|
|
|
result.bitField0_ = to_bitField0_;
|
|
|
onBuilt();
|
|
|
return result;
|
|
@@ -1778,6 +1810,9 @@ public final class HdfsProtos {
|
|
|
if (other.hasInfoPort()) {
|
|
|
setInfoPort(other.getInfoPort());
|
|
|
}
|
|
|
+ if (other.hasIpcPort()) {
|
|
|
+ setIpcPort(other.getIpcPort());
|
|
|
+ }
|
|
|
this.mergeUnknownFields(other.getUnknownFields());
|
|
|
return this;
|
|
|
}
|
|
@@ -1795,6 +1830,10 @@ public final class HdfsProtos {
|
|
|
|
|
|
return false;
|
|
|
}
|
|
|
+ if (!hasIpcPort()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -1836,6 +1875,11 @@ public final class HdfsProtos {
|
|
|
infoPort_ = input.readUInt32();
|
|
|
break;
|
|
|
}
|
|
|
+ case 32: {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ ipcPort_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1935,6 +1979,27 @@ public final class HdfsProtos {
|
|
|
return this;
|
|
|
}
|
|
|
|
|
|
+ // required uint32 ipcPort = 4;
|
|
|
+ private int ipcPort_ ;
|
|
|
+ public boolean hasIpcPort() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public int getIpcPort() {
|
|
|
+ return ipcPort_;
|
|
|
+ }
|
|
|
+ public Builder setIpcPort(int value) {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ ipcPort_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearIpcPort() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ ipcPort_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
// @@protoc_insertion_point(builder_scope:DatanodeIDProto)
|
|
|
}
|
|
|
|
|
@@ -3168,90 +3233,7274 @@ public final class HdfsProtos {
|
|
|
// @@protoc_insertion_point(class_scope:DatanodeInfoProto)
|
|
|
}
|
|
|
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_ExtendedBlockProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_ExtendedBlockProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_BlockTokenIdentifierProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_BlockTokenIdentifierProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_DatanodeIDProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_DatanodeIDProto_fieldAccessorTable;
|
|
|
- private static com.google.protobuf.Descriptors.Descriptor
|
|
|
- internal_static_DatanodeInfoProto_descriptor;
|
|
|
- private static
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
- internal_static_DatanodeInfoProto_fieldAccessorTable;
|
|
|
-
|
|
|
- public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
- getDescriptor() {
|
|
|
- return descriptor;
|
|
|
+ public interface ContentSummaryProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required uint64 length = 1;
|
|
|
+ boolean hasLength();
|
|
|
+ long getLength();
|
|
|
+
|
|
|
+ // required uint64 fileCount = 2;
|
|
|
+ boolean hasFileCount();
|
|
|
+ long getFileCount();
|
|
|
+
|
|
|
+ // required uint64 directoryCount = 3;
|
|
|
+ boolean hasDirectoryCount();
|
|
|
+ long getDirectoryCount();
|
|
|
+
|
|
|
+ // required uint64 quota = 4;
|
|
|
+ boolean hasQuota();
|
|
|
+ long getQuota();
|
|
|
+
|
|
|
+ // required uint64 spaceConsumed = 5;
|
|
|
+ boolean hasSpaceConsumed();
|
|
|
+ long getSpaceConsumed();
|
|
|
+
|
|
|
+ // required uint64 spaceQuota = 6;
|
|
|
+ boolean hasSpaceQuota();
|
|
|
+ long getSpaceQuota();
|
|
|
}
|
|
|
- private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
- descriptor;
|
|
|
- static {
|
|
|
- java.lang.String[] descriptorData = {
|
|
|
- "\n\nhdfs.proto\"`\n\022ExtendedBlockProto\022\016\n\006po" +
|
|
|
- "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\020\n\010numBytes\030" +
|
|
|
- "\003 \002(\004\022\027\n\017generationStamp\030\004 \002(\004\"`\n\031BlockT" +
|
|
|
- "okenIdentifierProto\022\022\n\nidentifier\030\001 \002(\014\022" +
|
|
|
- "\020\n\010password\030\002 \002(\014\022\014\n\004kind\030\003 \002(\t\022\017\n\007servi" +
|
|
|
- "ce\030\004 \002(\t\"D\n\017DatanodeIDProto\022\014\n\004name\030\001 \002(" +
|
|
|
- "\t\022\021\n\tstorageID\030\002 \002(\t\022\020\n\010infoPort\030\003 \002(\r\"\312" +
|
|
|
- "\002\n\021DatanodeInfoProto\022\034\n\002id\030\001 \002(\0132\020.Datan" +
|
|
|
- "odeIDProto\022\020\n\010capacity\030\002 \001(\004\022\017\n\007dfsUsed\030" +
|
|
|
- "\003 \001(\004\022\021\n\tremaining\030\004 \001(\004\022\025\n\rblockPoolUse",
|
|
|
- "d\030\005 \001(\004\022\022\n\nlastUpdate\030\006 \001(\004\022\024\n\014xceiverCo" +
|
|
|
- "unt\030\007 \001(\r\022\020\n\010location\030\010 \001(\t\022\020\n\010hostName\030" +
|
|
|
- "\t \001(\t\0221\n\nadminState\030\n \001(\0162\035.DatanodeInfo" +
|
|
|
- "Proto.AdminState\"I\n\nAdminState\022\n\n\006NORMAL" +
|
|
|
- "\020\000\022\033\n\027DECOMMISSION_INPROGRESS\020\001\022\022\n\016DECOM" +
|
|
|
- "MISSIONED\020\002B6\n%org.apache.hadoop.hdfs.pr" +
|
|
|
- "otocol.protoB\nHdfsProtos\240\001\001"
|
|
|
- };
|
|
|
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
- com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
- descriptor = root;
|
|
|
- internal_static_ExtendedBlockProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(0);
|
|
|
- internal_static_ExtendedBlockProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_ExtendedBlockProto_descriptor,
|
|
|
- new java.lang.String[] { "PoolId", "BlockId", "NumBytes", "GenerationStamp", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
|
|
|
- internal_static_BlockTokenIdentifierProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(1);
|
|
|
- internal_static_BlockTokenIdentifierProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_BlockTokenIdentifierProto_descriptor,
|
|
|
- new java.lang.String[] { "Identifier", "Password", "Kind", "Service", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder.class);
|
|
|
- internal_static_DatanodeIDProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(2);
|
|
|
- internal_static_DatanodeIDProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_DatanodeIDProto_descriptor,
|
|
|
- new java.lang.String[] { "Name", "StorageID", "InfoPort", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
|
|
|
- internal_static_DatanodeInfoProto_descriptor =
|
|
|
- getDescriptor().getMessageTypes().get(3);
|
|
|
- internal_static_DatanodeInfoProto_fieldAccessorTable = new
|
|
|
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
- internal_static_DatanodeInfoProto_descriptor,
|
|
|
- new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "HostName", "AdminState", },
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class,
|
|
|
- org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
|
|
|
+ public static final class ContentSummaryProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements ContentSummaryProtoOrBuilder {
|
|
|
+ // Use ContentSummaryProto.newBuilder() to construct.
|
|
|
+ private ContentSummaryProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private ContentSummaryProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final ContentSummaryProto defaultInstance;
|
|
|
+ public static ContentSummaryProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public ContentSummaryProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required uint64 length = 1;
|
|
|
+ public static final int LENGTH_FIELD_NUMBER = 1;
|
|
|
+ private long length_;
|
|
|
+ public boolean hasLength() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getLength() {
|
|
|
+ return length_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 fileCount = 2;
|
|
|
+ public static final int FILECOUNT_FIELD_NUMBER = 2;
|
|
|
+ private long fileCount_;
|
|
|
+ public boolean hasFileCount() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getFileCount() {
|
|
|
+ return fileCount_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 directoryCount = 3;
|
|
|
+ public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3;
|
|
|
+ private long directoryCount_;
|
|
|
+ public boolean hasDirectoryCount() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public long getDirectoryCount() {
|
|
|
+ return directoryCount_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 quota = 4;
|
|
|
+ public static final int QUOTA_FIELD_NUMBER = 4;
|
|
|
+ private long quota_;
|
|
|
+ public boolean hasQuota() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public long getQuota() {
|
|
|
+ return quota_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 spaceConsumed = 5;
|
|
|
+ public static final int SPACECONSUMED_FIELD_NUMBER = 5;
|
|
|
+ private long spaceConsumed_;
|
|
|
+ public boolean hasSpaceConsumed() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public long getSpaceConsumed() {
|
|
|
+ return spaceConsumed_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 spaceQuota = 6;
|
|
|
+ public static final int SPACEQUOTA_FIELD_NUMBER = 6;
|
|
|
+ private long spaceQuota_;
|
|
|
+ public boolean hasSpaceQuota() {
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
+ }
|
|
|
+ public long getSpaceQuota() {
|
|
|
+ return spaceQuota_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ length_ = 0L;
|
|
|
+ fileCount_ = 0L;
|
|
|
+ directoryCount_ = 0L;
|
|
|
+ quota_ = 0L;
|
|
|
+ spaceConsumed_ = 0L;
|
|
|
+ spaceQuota_ = 0L;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasLength()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasFileCount()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasDirectoryCount()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasQuota()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasSpaceConsumed()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasSpaceQuota()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt64(1, length_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt64(2, fileCount_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeUInt64(3, directoryCount_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeUInt64(4, quota_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ output.writeUInt64(5, spaceConsumed_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
+ output.writeUInt64(6, spaceQuota_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(1, length_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(2, fileCount_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(3, directoryCount_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(4, quota_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(5, spaceConsumed_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(6, spaceQuota_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasLength() == other.hasLength());
|
|
|
+ if (hasLength()) {
|
|
|
+ result = result && (getLength()
|
|
|
+ == other.getLength());
|
|
|
+ }
|
|
|
+ result = result && (hasFileCount() == other.hasFileCount());
|
|
|
+ if (hasFileCount()) {
|
|
|
+ result = result && (getFileCount()
|
|
|
+ == other.getFileCount());
|
|
|
+ }
|
|
|
+ result = result && (hasDirectoryCount() == other.hasDirectoryCount());
|
|
|
+ if (hasDirectoryCount()) {
|
|
|
+ result = result && (getDirectoryCount()
|
|
|
+ == other.getDirectoryCount());
|
|
|
+ }
|
|
|
+ result = result && (hasQuota() == other.hasQuota());
|
|
|
+ if (hasQuota()) {
|
|
|
+ result = result && (getQuota()
|
|
|
+ == other.getQuota());
|
|
|
+ }
|
|
|
+ result = result && (hasSpaceConsumed() == other.hasSpaceConsumed());
|
|
|
+ if (hasSpaceConsumed()) {
|
|
|
+ result = result && (getSpaceConsumed()
|
|
|
+ == other.getSpaceConsumed());
|
|
|
+ }
|
|
|
+ result = result && (hasSpaceQuota() == other.hasSpaceQuota());
|
|
|
+ if (hasSpaceQuota()) {
|
|
|
+ result = result && (getSpaceQuota()
|
|
|
+ == other.getSpaceQuota());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasLength()) {
|
|
|
+ hash = (37 * hash) + LENGTH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getLength());
|
|
|
+ }
|
|
|
+ if (hasFileCount()) {
|
|
|
+ hash = (37 * hash) + FILECOUNT_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getFileCount());
|
|
|
+ }
|
|
|
+ if (hasDirectoryCount()) {
|
|
|
+ hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getDirectoryCount());
|
|
|
+ }
|
|
|
+ if (hasQuota()) {
|
|
|
+ hash = (37 * hash) + QUOTA_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getQuota());
|
|
|
+ }
|
|
|
+ if (hasSpaceConsumed()) {
|
|
|
+ hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getSpaceConsumed());
|
|
|
+ }
|
|
|
+ if (hasSpaceQuota()) {
|
|
|
+ hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getSpaceQuota());
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_ContentSummaryProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ length_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ fileCount_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ directoryCount_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ quota_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ spaceConsumed_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ spaceQuota_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.length_ = length_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.fileCount_ = fileCount_;
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.directoryCount_ = directoryCount_;
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ result.quota_ = quota_;
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ to_bitField0_ |= 0x00000010;
|
|
|
+ }
|
|
|
+ result.spaceConsumed_ = spaceConsumed_;
|
|
|
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
+ to_bitField0_ |= 0x00000020;
|
|
|
+ }
|
|
|
+ result.spaceQuota_ = spaceQuota_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasLength()) {
|
|
|
+ setLength(other.getLength());
|
|
|
+ }
|
|
|
+ if (other.hasFileCount()) {
|
|
|
+ setFileCount(other.getFileCount());
|
|
|
+ }
|
|
|
+ if (other.hasDirectoryCount()) {
|
|
|
+ setDirectoryCount(other.getDirectoryCount());
|
|
|
+ }
|
|
|
+ if (other.hasQuota()) {
|
|
|
+ setQuota(other.getQuota());
|
|
|
+ }
|
|
|
+ if (other.hasSpaceConsumed()) {
|
|
|
+ setSpaceConsumed(other.getSpaceConsumed());
|
|
|
+ }
|
|
|
+ if (other.hasSpaceQuota()) {
|
|
|
+ setSpaceQuota(other.getSpaceQuota());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasLength()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasFileCount()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasDirectoryCount()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasQuota()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasSpaceConsumed()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasSpaceQuota()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ length_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ fileCount_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 24: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ directoryCount_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 32: {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ quota_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 40: {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ spaceConsumed_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 48: {
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
+ spaceQuota_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required uint64 length = 1;
|
|
|
+ private long length_ ;
|
|
|
+ public boolean hasLength() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getLength() {
|
|
|
+ return length_;
|
|
|
+ }
|
|
|
+ public Builder setLength(long value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ length_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLength() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ length_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 fileCount = 2;
|
|
|
+ private long fileCount_ ;
|
|
|
+ public boolean hasFileCount() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getFileCount() {
|
|
|
+ return fileCount_;
|
|
|
+ }
|
|
|
+ public Builder setFileCount(long value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ fileCount_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearFileCount() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ fileCount_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 directoryCount = 3;
|
|
|
+ private long directoryCount_ ;
|
|
|
+ public boolean hasDirectoryCount() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public long getDirectoryCount() {
|
|
|
+ return directoryCount_;
|
|
|
+ }
|
|
|
+ public Builder setDirectoryCount(long value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ directoryCount_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearDirectoryCount() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ directoryCount_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 quota = 4;
|
|
|
+ private long quota_ ;
|
|
|
+ public boolean hasQuota() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public long getQuota() {
|
|
|
+ return quota_;
|
|
|
+ }
|
|
|
+ public Builder setQuota(long value) {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ quota_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearQuota() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ quota_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 spaceConsumed = 5;
|
|
|
+ private long spaceConsumed_ ;
|
|
|
+ public boolean hasSpaceConsumed() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public long getSpaceConsumed() {
|
|
|
+ return spaceConsumed_;
|
|
|
+ }
|
|
|
+ public Builder setSpaceConsumed(long value) {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ spaceConsumed_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearSpaceConsumed() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ spaceConsumed_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 spaceQuota = 6;
|
|
|
+ private long spaceQuota_ ;
|
|
|
+ public boolean hasSpaceQuota() {
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
+ }
|
|
|
+ public long getSpaceQuota() {
|
|
|
+ return spaceQuota_;
|
|
|
+ }
|
|
|
+ public Builder setSpaceQuota(long value) {
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
+ spaceQuota_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearSpaceQuota() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
+ spaceQuota_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:ContentSummaryProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new ContentSummaryProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:ContentSummaryProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface CorruptFileBlocksProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // repeated string files = 1;
|
|
|
+ java.util.List<String> getFilesList();
|
|
|
+ int getFilesCount();
|
|
|
+ String getFiles(int index);
|
|
|
+
|
|
|
+ // required string cookie = 2;
|
|
|
+ boolean hasCookie();
|
|
|
+ String getCookie();
|
|
|
+ }
|
|
|
+ public static final class CorruptFileBlocksProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements CorruptFileBlocksProtoOrBuilder {
|
|
|
+ // Use CorruptFileBlocksProto.newBuilder() to construct.
|
|
|
+ private CorruptFileBlocksProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private CorruptFileBlocksProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final CorruptFileBlocksProto defaultInstance;
|
|
|
+ public static CorruptFileBlocksProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public CorruptFileBlocksProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // repeated string files = 1;
|
|
|
+ public static final int FILES_FIELD_NUMBER = 1;
|
|
|
+ private com.google.protobuf.LazyStringList files_;
|
|
|
+ public java.util.List<String>
|
|
|
+ getFilesList() {
|
|
|
+ return files_;
|
|
|
+ }
|
|
|
+ public int getFilesCount() {
|
|
|
+ return files_.size();
|
|
|
+ }
|
|
|
+ public String getFiles(int index) {
|
|
|
+ return files_.get(index);
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string cookie = 2;
|
|
|
+ public static final int COOKIE_FIELD_NUMBER = 2;
|
|
|
+ private java.lang.Object cookie_;
|
|
|
+ public boolean hasCookie() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public String getCookie() {
|
|
|
+ java.lang.Object ref = cookie_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ return (String) ref;
|
|
|
+ } else {
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
+ cookie_ = s;
|
|
|
+ }
|
|
|
+ return s;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.ByteString getCookieBytes() {
|
|
|
+ java.lang.Object ref = cookie_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
+ cookie_ = b;
|
|
|
+ return b;
|
|
|
+ } else {
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
|
|
+ cookie_ = "";
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasCookie()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ for (int i = 0; i < files_.size(); i++) {
|
|
|
+ output.writeBytes(1, files_.getByteString(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeBytes(2, getCookieBytes());
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ {
|
|
|
+ int dataSize = 0;
|
|
|
+ for (int i = 0; i < files_.size(); i++) {
|
|
|
+ dataSize += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSizeNoTag(files_.getByteString(i));
|
|
|
+ }
|
|
|
+ size += dataSize;
|
|
|
+ size += 1 * getFilesList().size();
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(2, getCookieBytes());
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && getFilesList()
|
|
|
+ .equals(other.getFilesList());
|
|
|
+ result = result && (hasCookie() == other.hasCookie());
|
|
|
+ if (hasCookie()) {
|
|
|
+ result = result && getCookie()
|
|
|
+ .equals(other.getCookie());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (getFilesCount() > 0) {
|
|
|
+ hash = (37 * hash) + FILES_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getFilesList().hashCode();
|
|
|
+ }
|
|
|
+ if (hasCookie()) {
|
|
|
+ hash = (37 * hash) + COOKIE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getCookie().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_CorruptFileBlocksProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ cookie_ = "";
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ files_ = new com.google.protobuf.UnmodifiableLazyStringList(
|
|
|
+ files_);
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ }
|
|
|
+ result.files_ = files_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.cookie_ = cookie_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this;
|
|
|
+ if (!other.files_.isEmpty()) {
|
|
|
+ if (files_.isEmpty()) {
|
|
|
+ files_ = other.files_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ } else {
|
|
|
+ ensureFilesIsMutable();
|
|
|
+ files_.addAll(other.files_);
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+ if (other.hasCookie()) {
|
|
|
+ setCookie(other.getCookie());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasCookie()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ ensureFilesIsMutable();
|
|
|
+ files_.add(input.readBytes());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 18: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ cookie_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // repeated string files = 1;
|
|
|
+ private com.google.protobuf.LazyStringList files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
|
|
+ private void ensureFilesIsMutable() {
|
|
|
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ files_ = new com.google.protobuf.LazyStringArrayList(files_);
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public java.util.List<String>
|
|
|
+ getFilesList() {
|
|
|
+ return java.util.Collections.unmodifiableList(files_);
|
|
|
+ }
|
|
|
+ public int getFilesCount() {
|
|
|
+ return files_.size();
|
|
|
+ }
|
|
|
+ public String getFiles(int index) {
|
|
|
+ return files_.get(index);
|
|
|
+ }
|
|
|
+ public Builder setFiles(
|
|
|
+ int index, String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureFilesIsMutable();
|
|
|
+ files_.set(index, value);
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addFiles(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureFilesIsMutable();
|
|
|
+ files_.add(value);
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addAllFiles(
|
|
|
+ java.lang.Iterable<String> values) {
|
|
|
+ ensureFilesIsMutable();
|
|
|
+ super.addAll(values, files_);
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearFiles() {
|
|
|
+ files_ = com.google.protobuf.LazyStringArrayList.EMPTY;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void addFiles(com.google.protobuf.ByteString value) {
|
|
|
+ ensureFilesIsMutable();
|
|
|
+ files_.add(value);
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string cookie = 2;
|
|
|
+ private java.lang.Object cookie_ = "";
|
|
|
+ public boolean hasCookie() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public String getCookie() {
|
|
|
+ java.lang.Object ref = cookie_;
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
+ cookie_ = s;
|
|
|
+ return s;
|
|
|
+ } else {
|
|
|
+ return (String) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setCookie(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ cookie_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearCookie() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ cookie_ = getDefaultInstance().getCookie();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void setCookie(com.google.protobuf.ByteString value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ cookie_ = value;
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:CorruptFileBlocksProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new CorruptFileBlocksProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:CorruptFileBlocksProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface FsPermissionProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required uint32 perm = 1;
|
|
|
+ boolean hasPerm();
|
|
|
+ int getPerm();
|
|
|
+ }
|
|
|
+ public static final class FsPermissionProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements FsPermissionProtoOrBuilder {
|
|
|
+ // Use FsPermissionProto.newBuilder() to construct.
|
|
|
+ private FsPermissionProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private FsPermissionProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final FsPermissionProto defaultInstance;
|
|
|
+ public static FsPermissionProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public FsPermissionProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required uint32 perm = 1;
|
|
|
+ public static final int PERM_FIELD_NUMBER = 1;
|
|
|
+ private int perm_;
|
|
|
+ public boolean hasPerm() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public int getPerm() {
|
|
|
+ return perm_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ perm_ = 0;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasPerm()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt32(1, perm_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(1, perm_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasPerm() == other.hasPerm());
|
|
|
+ if (hasPerm()) {
|
|
|
+ result = result && (getPerm()
|
|
|
+ == other.getPerm());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasPerm()) {
|
|
|
+ hash = (37 * hash) + PERM_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getPerm();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsPermissionProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ perm_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.perm_ = perm_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasPerm()) {
|
|
|
+ setPerm(other.getPerm());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasPerm()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ perm_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required uint32 perm = 1;
|
|
|
+ private int perm_ ;
|
|
|
+ public boolean hasPerm() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public int getPerm() {
|
|
|
+ return perm_;
|
|
|
+ }
|
|
|
+ public Builder setPerm(int value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ perm_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearPerm() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ perm_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:FsPermissionProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new FsPermissionProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:FsPermissionProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface LocatedBlockProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto b = 1;
|
|
|
+ boolean hasB();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder();
|
|
|
+
|
|
|
+ // required uint64 offset = 2;
|
|
|
+ boolean hasOffset();
|
|
|
+ long getOffset();
|
|
|
+
|
|
|
+ // repeated .DatanodeInfoProto locs = 3;
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>
|
|
|
+ getLocsList();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index);
|
|
|
+ int getLocsCount();
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
+ getLocsOrBuilderList();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
|
|
|
+ int index);
|
|
|
+
|
|
|
+ // required bool corrupt = 4;
|
|
|
+ boolean hasCorrupt();
|
|
|
+ boolean getCorrupt();
|
|
|
+
|
|
|
+ // required .BlockTokenIdentifierProto blockToken = 5;
|
|
|
+ boolean hasBlockToken();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class LocatedBlockProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements LocatedBlockProtoOrBuilder {
|
|
|
+ // Use LocatedBlockProto.newBuilder() to construct.
|
|
|
+ private LocatedBlockProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private LocatedBlockProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final LocatedBlockProto defaultInstance;
|
|
|
+ public static LocatedBlockProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public LocatedBlockProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .ExtendedBlockProto b = 1;
|
|
|
+ public static final int B_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_;
|
|
|
+ public boolean hasB() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
|
|
|
+ return b_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
|
|
|
+ return b_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 offset = 2;
|
|
|
+ public static final int OFFSET_FIELD_NUMBER = 2;
|
|
|
+ private long offset_;
|
|
|
+ public boolean hasOffset() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getOffset() {
|
|
|
+ return offset_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // repeated .DatanodeInfoProto locs = 3;
|
|
|
+ public static final int LOCS_FIELD_NUMBER = 3;
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> locs_;
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getLocsList() {
|
|
|
+ return locs_;
|
|
|
+ }
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
+ getLocsOrBuilderList() {
|
|
|
+ return locs_;
|
|
|
+ }
|
|
|
+ public int getLocsCount() {
|
|
|
+ return locs_.size();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
|
|
|
+ return locs_.get(index);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
|
|
|
+ int index) {
|
|
|
+ return locs_.get(index);
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool corrupt = 4;
|
|
|
+ public static final int CORRUPT_FIELD_NUMBER = 4;
|
|
|
+ private boolean corrupt_;
|
|
|
+ public boolean hasCorrupt() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public boolean getCorrupt() {
|
|
|
+ return corrupt_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .BlockTokenIdentifierProto blockToken = 5;
|
|
|
+ public static final int BLOCKTOKEN_FIELD_NUMBER = 5;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto blockToken_;
|
|
|
+ public boolean hasBlockToken() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken() {
|
|
|
+ return blockToken_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder() {
|
|
|
+ return blockToken_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ offset_ = 0L;
|
|
|
+ locs_ = java.util.Collections.emptyList();
|
|
|
+ corrupt_ = false;
|
|
|
+ blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasB()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasOffset()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasCorrupt()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasBlockToken()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getB().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ for (int i = 0; i < getLocsCount(); i++) {
|
|
|
+ if (!getLocs(i).isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (!getBlockToken().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeMessage(1, b_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt64(2, offset_);
|
|
|
+ }
|
|
|
+ for (int i = 0; i < locs_.size(); i++) {
|
|
|
+ output.writeMessage(3, locs_.get(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeBool(4, corrupt_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeMessage(5, blockToken_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, b_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(2, offset_);
|
|
|
+ }
|
|
|
+ for (int i = 0; i < locs_.size(); i++) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(3, locs_.get(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBoolSize(4, corrupt_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(5, blockToken_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasB() == other.hasB());
|
|
|
+ if (hasB()) {
|
|
|
+ result = result && getB()
|
|
|
+ .equals(other.getB());
|
|
|
+ }
|
|
|
+ result = result && (hasOffset() == other.hasOffset());
|
|
|
+ if (hasOffset()) {
|
|
|
+ result = result && (getOffset()
|
|
|
+ == other.getOffset());
|
|
|
+ }
|
|
|
+ result = result && getLocsList()
|
|
|
+ .equals(other.getLocsList());
|
|
|
+ result = result && (hasCorrupt() == other.hasCorrupt());
|
|
|
+ if (hasCorrupt()) {
|
|
|
+ result = result && (getCorrupt()
|
|
|
+ == other.getCorrupt());
|
|
|
+ }
|
|
|
+ result = result && (hasBlockToken() == other.hasBlockToken());
|
|
|
+ if (hasBlockToken()) {
|
|
|
+ result = result && getBlockToken()
|
|
|
+ .equals(other.getBlockToken());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasB()) {
|
|
|
+ hash = (37 * hash) + B_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getB().hashCode();
|
|
|
+ }
|
|
|
+ if (hasOffset()) {
|
|
|
+ hash = (37 * hash) + OFFSET_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getOffset());
|
|
|
+ }
|
|
|
+ if (getLocsCount() > 0) {
|
|
|
+ hash = (37 * hash) + LOCS_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getLocsList().hashCode();
|
|
|
+ }
|
|
|
+ if (hasCorrupt()) {
|
|
|
+ hash = (37 * hash) + CORRUPT_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashBoolean(getCorrupt());
|
|
|
+ }
|
|
|
+ if (hasBlockToken()) {
|
|
|
+ hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlockToken().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlockProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBFieldBuilder();
|
|
|
+ getLocsFieldBuilder();
|
|
|
+ getBlockTokenFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ bBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ offset_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ locs_ = java.util.Collections.emptyList();
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ } else {
|
|
|
+ locsBuilder_.clear();
|
|
|
+ }
|
|
|
+ corrupt_ = false;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ blockTokenBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ result.b_ = b_;
|
|
|
+ } else {
|
|
|
+ result.b_ = bBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.offset_ = offset_;
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ locs_ = java.util.Collections.unmodifiableList(locs_);
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ }
|
|
|
+ result.locs_ = locs_;
|
|
|
+ } else {
|
|
|
+ result.locs_ = locsBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.corrupt_ = corrupt_;
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ result.blockToken_ = blockToken_;
|
|
|
+ } else {
|
|
|
+ result.blockToken_ = blockTokenBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasB()) {
|
|
|
+ mergeB(other.getB());
|
|
|
+ }
|
|
|
+ if (other.hasOffset()) {
|
|
|
+ setOffset(other.getOffset());
|
|
|
+ }
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ if (!other.locs_.isEmpty()) {
|
|
|
+ if (locs_.isEmpty()) {
|
|
|
+ locs_ = other.locs_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ } else {
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.addAll(other.locs_);
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!other.locs_.isEmpty()) {
|
|
|
+ if (locsBuilder_.isEmpty()) {
|
|
|
+ locsBuilder_.dispose();
|
|
|
+ locsBuilder_ = null;
|
|
|
+ locs_ = other.locs_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ locsBuilder_ =
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
+ getLocsFieldBuilder() : null;
|
|
|
+ } else {
|
|
|
+ locsBuilder_.addAllMessages(other.locs_);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (other.hasCorrupt()) {
|
|
|
+ setCorrupt(other.getCorrupt());
|
|
|
+ }
|
|
|
+ if (other.hasBlockToken()) {
|
|
|
+ mergeBlockToken(other.getBlockToken());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasB()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasOffset()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasCorrupt()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasBlockToken()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getB().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ for (int i = 0; i < getLocsCount(); i++) {
|
|
|
+ if (!getLocs(i).isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (!getBlockToken().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder();
|
|
|
+ if (hasB()) {
|
|
|
+ subBuilder.mergeFrom(getB());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setB(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ offset_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 26: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder();
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ addLocs(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 32: {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ corrupt_ = input.readBool();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 42: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder();
|
|
|
+ if (hasBlockToken()) {
|
|
|
+ subBuilder.mergeFrom(getBlockToken());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setBlockToken(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .ExtendedBlockProto b = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_;
|
|
|
+ public boolean hasB() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ return b_;
|
|
|
+ } else {
|
|
|
+ return bBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ b_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ bBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setB(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ b_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ bBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
|
|
|
+ b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
|
|
|
+ b_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder(b_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ b_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ bBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearB() {
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ b_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ bBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ onChanged();
|
|
|
+ return getBFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
|
|
|
+ if (bBuilder_ != null) {
|
|
|
+ return bBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return b_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>
|
|
|
+ getBFieldBuilder() {
|
|
|
+ if (bBuilder_ == null) {
|
|
|
+ bBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
|
|
|
+ b_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ b_ = null;
|
|
|
+ }
|
|
|
+ return bBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 offset = 2;
|
|
|
+ private long offset_ ;
|
|
|
+ public boolean hasOffset() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public long getOffset() {
|
|
|
+ return offset_;
|
|
|
+ }
|
|
|
+ public Builder setOffset(long value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ offset_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearOffset() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ offset_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // repeated .DatanodeInfoProto locs = 3;
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> locs_ =
|
|
|
+ java.util.Collections.emptyList();
|
|
|
+ private void ensureLocsIsMutable() {
|
|
|
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ locs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(locs_);
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_;
|
|
|
+
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getLocsList() {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ return java.util.Collections.unmodifiableList(locs_);
|
|
|
+ } else {
|
|
|
+ return locsBuilder_.getMessageList();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public int getLocsCount() {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ return locs_.size();
|
|
|
+ } else {
|
|
|
+ return locsBuilder_.getCount();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ return locs_.get(index);
|
|
|
+ } else {
|
|
|
+ return locsBuilder_.getMessage(index);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setLocs(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.set(index, value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.setMessage(index, value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setLocs(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.set(index, builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.setMessage(index, builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.add(value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.addMessage(value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addLocs(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.add(index, value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.addMessage(index, value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addLocs(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.add(builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.addMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addLocs(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.add(index, builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.addMessage(index, builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addAllLocs(
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ super.addAll(values, locs_);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.addAllMessages(values);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLocs() {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ locs_ = java.util.Collections.emptyList();
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.clear();
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder removeLocs(int index) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ ensureLocsIsMutable();
|
|
|
+ locs_.remove(index);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locsBuilder_.remove(index);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder(
|
|
|
+ int index) {
|
|
|
+ return getLocsFieldBuilder().getBuilder(index);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
|
|
|
+ int index) {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ return locs_.get(index); } else {
|
|
|
+ return locsBuilder_.getMessageOrBuilder(index);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
+ getLocsOrBuilderList() {
|
|
|
+ if (locsBuilder_ != null) {
|
|
|
+ return locsBuilder_.getMessageOrBuilderList();
|
|
|
+ } else {
|
|
|
+ return java.util.Collections.unmodifiableList(locs_);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() {
|
|
|
+ return getLocsFieldBuilder().addBuilder(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder(
|
|
|
+ int index) {
|
|
|
+ return getLocsFieldBuilder().addBuilder(
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder>
|
|
|
+ getLocsBuilderList() {
|
|
|
+ return getLocsFieldBuilder().getBuilderList();
|
|
|
+ }
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>
|
|
|
+ getLocsFieldBuilder() {
|
|
|
+ if (locsBuilder_ == null) {
|
|
|
+ locsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
|
|
|
+ locs_,
|
|
|
+ ((bitField0_ & 0x00000004) == 0x00000004),
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ locs_ = null;
|
|
|
+ }
|
|
|
+ return locsBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool corrupt = 4;
|
|
|
+ private boolean corrupt_ ;
|
|
|
+ public boolean hasCorrupt() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public boolean getCorrupt() {
|
|
|
+ return corrupt_;
|
|
|
+ }
|
|
|
+ public Builder setCorrupt(boolean value) {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ corrupt_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearCorrupt() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ corrupt_ = false;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .BlockTokenIdentifierProto blockToken = 5;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder> blockTokenBuilder_;
|
|
|
+ public boolean hasBlockToken() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto getBlockToken() {
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ return blockToken_;
|
|
|
+ } else {
|
|
|
+ return blockTokenBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlockToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ blockToken_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockTokenBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlockToken(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder builderForValue) {
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ blockToken_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockTokenBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeBlockToken(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto value) {
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010) &&
|
|
|
+ blockToken_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance()) {
|
|
|
+ blockToken_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.newBuilder(blockToken_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ blockToken_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockTokenBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlockToken() {
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ blockToken_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blockTokenBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder getBlockTokenBuilder() {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ onChanged();
|
|
|
+ return getBlockTokenFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder getBlockTokenOrBuilder() {
|
|
|
+ if (blockTokenBuilder_ != null) {
|
|
|
+ return blockTokenBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return blockToken_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>
|
|
|
+ getBlockTokenFieldBuilder() {
|
|
|
+ if (blockTokenBuilder_ == null) {
|
|
|
+ blockTokenBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProtoOrBuilder>(
|
|
|
+ blockToken_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ blockToken_ = null;
|
|
|
+ }
|
|
|
+ return blockTokenBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:LocatedBlockProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new LocatedBlockProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:LocatedBlockProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface LocatedBlocksProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required uint64 fileLength = 1;
|
|
|
+ boolean hasFileLength();
|
|
|
+ long getFileLength();
|
|
|
+
|
|
|
+ // repeated .LocatedBlockProto blocks = 2;
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto>
|
|
|
+ getBlocksList();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
|
|
|
+ int getBlocksCount();
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
+ getBlocksOrBuilderList();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
+ int index);
|
|
|
+
|
|
|
+ // required bool underConstruction = 3;
|
|
|
+ boolean hasUnderConstruction();
|
|
|
+ boolean getUnderConstruction();
|
|
|
+
|
|
|
+ // optional .LocatedBlockProto lastBlock = 4;
|
|
|
+ boolean hasLastBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder();
|
|
|
+
|
|
|
+ // required bool isLastBlockComplete = 5;
|
|
|
+ boolean hasIsLastBlockComplete();
|
|
|
+ boolean getIsLastBlockComplete();
|
|
|
+ }
|
|
|
+ public static final class LocatedBlocksProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements LocatedBlocksProtoOrBuilder {
|
|
|
+ // Use LocatedBlocksProto.newBuilder() to construct.
|
|
|
+ private LocatedBlocksProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private LocatedBlocksProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final LocatedBlocksProto defaultInstance;
|
|
|
+ public static LocatedBlocksProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public LocatedBlocksProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required uint64 fileLength = 1;
|
|
|
+ public static final int FILELENGTH_FIELD_NUMBER = 1;
|
|
|
+ private long fileLength_;
|
|
|
+ public boolean hasFileLength() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getFileLength() {
|
|
|
+ return fileLength_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // repeated .LocatedBlockProto blocks = 2;
|
|
|
+ public static final int BLOCKS_FIELD_NUMBER = 2;
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_;
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
|
|
|
+ return blocks_;
|
|
|
+ }
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
+ return blocks_;
|
|
|
+ }
|
|
|
+ public int getBlocksCount() {
|
|
|
+ return blocks_.size();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
|
|
|
+ return blocks_.get(index);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
+ int index) {
|
|
|
+ return blocks_.get(index);
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool underConstruction = 3;
|
|
|
+ public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3;
|
|
|
+ private boolean underConstruction_;
|
|
|
+ public boolean hasUnderConstruction() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public boolean getUnderConstruction() {
|
|
|
+ return underConstruction_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional .LocatedBlockProto lastBlock = 4;
|
|
|
+ public static final int LASTBLOCK_FIELD_NUMBER = 4;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_;
|
|
|
+ public boolean hasLastBlock() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
|
|
|
+ return lastBlock_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
|
|
|
+ return lastBlock_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool isLastBlockComplete = 5;
|
|
|
+ public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5;
|
|
|
+ private boolean isLastBlockComplete_;
|
|
|
+ public boolean hasIsLastBlockComplete() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public boolean getIsLastBlockComplete() {
|
|
|
+ return isLastBlockComplete_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ fileLength_ = 0L;
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
+ underConstruction_ = false;
|
|
|
+ lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
|
|
|
+ isLastBlockComplete_ = false;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasFileLength()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasUnderConstruction()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasIsLastBlockComplete()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (hasLastBlock()) {
|
|
|
+ if (!getLastBlock().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt64(1, fileLength_);
|
|
|
+ }
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
+ output.writeMessage(2, blocks_.get(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeBool(3, underConstruction_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeMessage(4, lastBlock_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeBool(5, isLastBlockComplete_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(1, fileLength_);
|
|
|
+ }
|
|
|
+ for (int i = 0; i < blocks_.size(); i++) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(2, blocks_.get(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBoolSize(3, underConstruction_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(4, lastBlock_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBoolSize(5, isLastBlockComplete_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasFileLength() == other.hasFileLength());
|
|
|
+ if (hasFileLength()) {
|
|
|
+ result = result && (getFileLength()
|
|
|
+ == other.getFileLength());
|
|
|
+ }
|
|
|
+ result = result && getBlocksList()
|
|
|
+ .equals(other.getBlocksList());
|
|
|
+ result = result && (hasUnderConstruction() == other.hasUnderConstruction());
|
|
|
+ if (hasUnderConstruction()) {
|
|
|
+ result = result && (getUnderConstruction()
|
|
|
+ == other.getUnderConstruction());
|
|
|
+ }
|
|
|
+ result = result && (hasLastBlock() == other.hasLastBlock());
|
|
|
+ if (hasLastBlock()) {
|
|
|
+ result = result && getLastBlock()
|
|
|
+ .equals(other.getLastBlock());
|
|
|
+ }
|
|
|
+ result = result && (hasIsLastBlockComplete() == other.hasIsLastBlockComplete());
|
|
|
+ if (hasIsLastBlockComplete()) {
|
|
|
+ result = result && (getIsLastBlockComplete()
|
|
|
+ == other.getIsLastBlockComplete());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasFileLength()) {
|
|
|
+ hash = (37 * hash) + FILELENGTH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getFileLength());
|
|
|
+ }
|
|
|
+ if (getBlocksCount() > 0) {
|
|
|
+ hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlocksList().hashCode();
|
|
|
+ }
|
|
|
+ if (hasUnderConstruction()) {
|
|
|
+ hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashBoolean(getUnderConstruction());
|
|
|
+ }
|
|
|
+ if (hasLastBlock()) {
|
|
|
+ hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getLastBlock().hashCode();
|
|
|
+ }
|
|
|
+ if (hasIsLastBlockComplete()) {
|
|
|
+ hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashBoolean(getIsLastBlockComplete());
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_LocatedBlocksProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getBlocksFieldBuilder();
|
|
|
+ getLastBlockFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ fileLength_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.clear();
|
|
|
+ }
|
|
|
+ underConstruction_ = false;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ lastBlockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ isLastBlockComplete_ = false;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.fileLength_ = fileLength_;
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ blocks_ = java.util.Collections.unmodifiableList(blocks_);
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ }
|
|
|
+ result.blocks_ = blocks_;
|
|
|
+ } else {
|
|
|
+ result.blocks_ = blocksBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.underConstruction_ = underConstruction_;
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ result.lastBlock_ = lastBlock_;
|
|
|
+ } else {
|
|
|
+ result.lastBlock_ = lastBlockBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ result.isLastBlockComplete_ = isLastBlockComplete_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasFileLength()) {
|
|
|
+ setFileLength(other.getFileLength());
|
|
|
+ }
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
+ if (blocks_.isEmpty()) {
|
|
|
+ blocks_ = other.blocks_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ } else {
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.addAll(other.blocks_);
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!other.blocks_.isEmpty()) {
|
|
|
+ if (blocksBuilder_.isEmpty()) {
|
|
|
+ blocksBuilder_.dispose();
|
|
|
+ blocksBuilder_ = null;
|
|
|
+ blocks_ = other.blocks_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ blocksBuilder_ =
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
+ getBlocksFieldBuilder() : null;
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.addAllMessages(other.blocks_);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (other.hasUnderConstruction()) {
|
|
|
+ setUnderConstruction(other.getUnderConstruction());
|
|
|
+ }
|
|
|
+ if (other.hasLastBlock()) {
|
|
|
+ mergeLastBlock(other.getLastBlock());
|
|
|
+ }
|
|
|
+ if (other.hasIsLastBlockComplete()) {
|
|
|
+ setIsLastBlockComplete(other.getIsLastBlockComplete());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasFileLength()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasUnderConstruction()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasIsLastBlockComplete()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ for (int i = 0; i < getBlocksCount(); i++) {
|
|
|
+ if (!getBlocks(i).isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (hasLastBlock()) {
|
|
|
+ if (!getLastBlock().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ fileLength_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 18: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder();
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ addBlocks(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 24: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ underConstruction_ = input.readBool();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 34: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder();
|
|
|
+ if (hasLastBlock()) {
|
|
|
+ subBuilder.mergeFrom(getLastBlock());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setLastBlock(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 40: {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ isLastBlockComplete_ = input.readBool();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required uint64 fileLength = 1;
|
|
|
+ private long fileLength_ ;
|
|
|
+ public boolean hasFileLength() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getFileLength() {
|
|
|
+ return fileLength_;
|
|
|
+ }
|
|
|
+ public Builder setFileLength(long value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ fileLength_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearFileLength() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ fileLength_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // repeated .LocatedBlockProto blocks = 2;
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_ =
|
|
|
+ java.util.Collections.emptyList();
|
|
|
+ private void ensureBlocksIsMutable() {
|
|
|
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto>(blocks_);
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;
|
|
|
+
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
+ } else {
|
|
|
+ return blocksBuilder_.getMessageList();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public int getBlocksCount() {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ return blocks_.size();
|
|
|
+ } else {
|
|
|
+ return blocksBuilder_.getCount();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ return blocks_.get(index);
|
|
|
+ } else {
|
|
|
+ return blocksBuilder_.getMessage(index);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setBlocks(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.set(index, value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.setMessage(index, value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setBlocks(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.set(index, builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.setMessage(index, builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.add(value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.addMessage(value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addBlocks(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.add(index, value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.addMessage(index, value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addBlocks(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.add(builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.addMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addBlocks(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.add(index, builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.addMessage(index, builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addAllBlocks(
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ super.addAll(values, blocks_);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.addAllMessages(values);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlocks() {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ blocks_ = java.util.Collections.emptyList();
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.clear();
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder removeBlocks(int index) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ ensureBlocksIsMutable();
|
|
|
+ blocks_.remove(index);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ blocksBuilder_.remove(index);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
|
|
|
+ int index) {
|
|
|
+ return getBlocksFieldBuilder().getBuilder(index);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
|
|
|
+ int index) {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ return blocks_.get(index); } else {
|
|
|
+ return blocksBuilder_.getMessageOrBuilder(index);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
+ getBlocksOrBuilderList() {
|
|
|
+ if (blocksBuilder_ != null) {
|
|
|
+ return blocksBuilder_.getMessageOrBuilderList();
|
|
|
+ } else {
|
|
|
+ return java.util.Collections.unmodifiableList(blocks_);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
|
|
|
+ int index) {
|
|
|
+ return getBlocksFieldBuilder().addBuilder(
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder>
|
|
|
+ getBlocksBuilderList() {
|
|
|
+ return getBlocksFieldBuilder().getBuilderList();
|
|
|
+ }
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
+ getBlocksFieldBuilder() {
|
|
|
+ if (blocksBuilder_ == null) {
|
|
|
+ blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
|
|
|
+ blocks_,
|
|
|
+ ((bitField0_ & 0x00000002) == 0x00000002),
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ blocks_ = null;
|
|
|
+ }
|
|
|
+ return blocksBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool underConstruction = 3;
|
|
|
+ private boolean underConstruction_ ;
|
|
|
+ public boolean hasUnderConstruction() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public boolean getUnderConstruction() {
|
|
|
+ return underConstruction_;
|
|
|
+ }
|
|
|
+ public Builder setUnderConstruction(boolean value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ underConstruction_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearUnderConstruction() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ underConstruction_ = false;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional .LocatedBlockProto lastBlock = 4;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_;
|
|
|
+ public boolean hasLastBlock() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ return lastBlock_;
|
|
|
+ } else {
|
|
|
+ return lastBlockBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ lastBlock_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ lastBlockBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setLastBlock(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ lastBlock_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ lastBlockBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
|
|
|
+ lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) {
|
|
|
+ lastBlock_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder(lastBlock_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ lastBlock_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ lastBlockBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLastBlock() {
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ lastBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ lastBlockBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ onChanged();
|
|
|
+ return getLastBlockFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
|
|
|
+ if (lastBlockBuilder_ != null) {
|
|
|
+ return lastBlockBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return lastBlock_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>
|
|
|
+ getLastBlockFieldBuilder() {
|
|
|
+ if (lastBlockBuilder_ == null) {
|
|
|
+ lastBlockBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
|
|
|
+ lastBlock_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ lastBlock_ = null;
|
|
|
+ }
|
|
|
+ return lastBlockBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bool isLastBlockComplete = 5;
|
|
|
+ private boolean isLastBlockComplete_ ;
|
|
|
+ public boolean hasIsLastBlockComplete() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public boolean getIsLastBlockComplete() {
|
|
|
+ return isLastBlockComplete_;
|
|
|
+ }
|
|
|
+ public Builder setIsLastBlockComplete(boolean value) {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ isLastBlockComplete_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearIsLastBlockComplete() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ isLastBlockComplete_ = false;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:LocatedBlocksProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new LocatedBlocksProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:LocatedBlocksProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface HdfsFileStatusProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required .HdfsFileStatusProto.FileType fileType = 1;
|
|
|
+ boolean hasFileType();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType();
|
|
|
+
|
|
|
+ // required bytes path = 2;
|
|
|
+ boolean hasPath();
|
|
|
+ com.google.protobuf.ByteString getPath();
|
|
|
+
|
|
|
+ // required uint64 length = 3;
|
|
|
+ boolean hasLength();
|
|
|
+ long getLength();
|
|
|
+
|
|
|
+ // required .FsPermissionProto permission = 4;
|
|
|
+ boolean hasPermission();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder();
|
|
|
+
|
|
|
+ // required string owner = 5;
|
|
|
+ boolean hasOwner();
|
|
|
+ String getOwner();
|
|
|
+
|
|
|
+ // required string group = 6;
|
|
|
+ boolean hasGroup();
|
|
|
+ String getGroup();
|
|
|
+
|
|
|
+ // required uint64 modification_time = 7;
|
|
|
+ boolean hasModificationTime();
|
|
|
+ long getModificationTime();
|
|
|
+
|
|
|
+ // required uint64 access_time = 8;
|
|
|
+ boolean hasAccessTime();
|
|
|
+ long getAccessTime();
|
|
|
+
|
|
|
+ // optional bytes symlink = 9;
|
|
|
+ boolean hasSymlink();
|
|
|
+ com.google.protobuf.ByteString getSymlink();
|
|
|
+
|
|
|
+ // optional uint32 block_replication = 10;
|
|
|
+ boolean hasBlockReplication();
|
|
|
+ int getBlockReplication();
|
|
|
+
|
|
|
+ // optional uint64 blocksize = 11;
|
|
|
+ boolean hasBlocksize();
|
|
|
+ long getBlocksize();
|
|
|
+
|
|
|
+ // optional .LocatedBlocksProto locations = 12;
|
|
|
+ boolean hasLocations();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder();
|
|
|
+ }
|
|
|
+ public static final class HdfsFileStatusProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements HdfsFileStatusProtoOrBuilder {
|
|
|
+ // Use HdfsFileStatusProto.newBuilder() to construct.
|
|
|
+ private HdfsFileStatusProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private HdfsFileStatusProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final HdfsFileStatusProto defaultInstance;
|
|
|
+ public static HdfsFileStatusProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public HdfsFileStatusProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ public enum FileType
|
|
|
+ implements com.google.protobuf.ProtocolMessageEnum {
|
|
|
+ IS_DIR(0, 1),
|
|
|
+ IS_FILE(1, 2),
|
|
|
+ IS_SYMLINK(2, 3),
|
|
|
+ ;
|
|
|
+
|
|
|
+ public static final int IS_DIR_VALUE = 1;
|
|
|
+ public static final int IS_FILE_VALUE = 2;
|
|
|
+ public static final int IS_SYMLINK_VALUE = 3;
|
|
|
+
|
|
|
+
|
|
|
+ public final int getNumber() { return value; }
|
|
|
+
|
|
|
+ public static FileType valueOf(int value) {
|
|
|
+ switch (value) {
|
|
|
+ case 1: return IS_DIR;
|
|
|
+ case 2: return IS_FILE;
|
|
|
+ case 3: return IS_SYMLINK;
|
|
|
+ default: return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public static com.google.protobuf.Internal.EnumLiteMap<FileType>
|
|
|
+ internalGetValueMap() {
|
|
|
+ return internalValueMap;
|
|
|
+ }
|
|
|
+ private static com.google.protobuf.Internal.EnumLiteMap<FileType>
|
|
|
+ internalValueMap =
|
|
|
+ new com.google.protobuf.Internal.EnumLiteMap<FileType>() {
|
|
|
+ public FileType findValueByNumber(int number) {
|
|
|
+ return FileType.valueOf(number);
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
|
|
|
+ getValueDescriptor() {
|
|
|
+ return getDescriptor().getValues().get(index);
|
|
|
+ }
|
|
|
+ public final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return getDescriptor();
|
|
|
+ }
|
|
|
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0);
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final FileType[] VALUES = {
|
|
|
+ IS_DIR, IS_FILE, IS_SYMLINK,
|
|
|
+ };
|
|
|
+
|
|
|
+ public static FileType valueOf(
|
|
|
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
|
|
|
+ if (desc.getType() != getDescriptor()) {
|
|
|
+ throw new java.lang.IllegalArgumentException(
|
|
|
+ "EnumValueDescriptor is not for this type.");
|
|
|
+ }
|
|
|
+ return VALUES[desc.getIndex()];
|
|
|
+ }
|
|
|
+
|
|
|
+ private final int index;
|
|
|
+ private final int value;
|
|
|
+
|
|
|
+ private FileType(int index, int value) {
|
|
|
+ this.index = index;
|
|
|
+ this.value = value;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(enum_scope:HdfsFileStatusProto.FileType)
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required .HdfsFileStatusProto.FileType fileType = 1;
|
|
|
+ public static final int FILETYPE_FIELD_NUMBER = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_;
|
|
|
+ public boolean hasFileType() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
|
|
|
+ return fileType_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bytes path = 2;
|
|
|
+ public static final int PATH_FIELD_NUMBER = 2;
|
|
|
+ private com.google.protobuf.ByteString path_;
|
|
|
+ public boolean hasPath() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public com.google.protobuf.ByteString getPath() {
|
|
|
+ return path_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 length = 3;
|
|
|
+ public static final int LENGTH_FIELD_NUMBER = 3;
|
|
|
+ private long length_;
|
|
|
+ public boolean hasLength() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public long getLength() {
|
|
|
+ return length_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .FsPermissionProto permission = 4;
|
|
|
+ public static final int PERMISSION_FIELD_NUMBER = 4;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_;
|
|
|
+ public boolean hasPermission() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() {
|
|
|
+ return permission_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
|
|
|
+ return permission_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string owner = 5;
|
|
|
+ public static final int OWNER_FIELD_NUMBER = 5;
|
|
|
+ private java.lang.Object owner_;
|
|
|
+ public boolean hasOwner() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public String getOwner() {
|
|
|
+ java.lang.Object ref = owner_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ return (String) ref;
|
|
|
+ } else {
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
+ owner_ = s;
|
|
|
+ }
|
|
|
+ return s;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.ByteString getOwnerBytes() {
|
|
|
+ java.lang.Object ref = owner_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
+ owner_ = b;
|
|
|
+ return b;
|
|
|
+ } else {
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string group = 6;
|
|
|
+ public static final int GROUP_FIELD_NUMBER = 6;
|
|
|
+ private java.lang.Object group_;
|
|
|
+ public boolean hasGroup() {
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
+ }
|
|
|
+ public String getGroup() {
|
|
|
+ java.lang.Object ref = group_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ return (String) ref;
|
|
|
+ } else {
|
|
|
+ com.google.protobuf.ByteString bs =
|
|
|
+ (com.google.protobuf.ByteString) ref;
|
|
|
+ String s = bs.toStringUtf8();
|
|
|
+ if (com.google.protobuf.Internal.isValidUtf8(bs)) {
|
|
|
+ group_ = s;
|
|
|
+ }
|
|
|
+ return s;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.ByteString getGroupBytes() {
|
|
|
+ java.lang.Object ref = group_;
|
|
|
+ if (ref instanceof String) {
|
|
|
+ com.google.protobuf.ByteString b =
|
|
|
+ com.google.protobuf.ByteString.copyFromUtf8((String) ref);
|
|
|
+ group_ = b;
|
|
|
+ return b;
|
|
|
+ } else {
|
|
|
+ return (com.google.protobuf.ByteString) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 modification_time = 7;
|
|
|
+ public static final int MODIFICATION_TIME_FIELD_NUMBER = 7;
|
|
|
+ private long modificationTime_;
|
|
|
+ public boolean hasModificationTime() {
|
|
|
+ return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
+ }
|
|
|
+ public long getModificationTime() {
|
|
|
+ return modificationTime_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 access_time = 8;
|
|
|
+ public static final int ACCESS_TIME_FIELD_NUMBER = 8;
|
|
|
+ private long accessTime_;
|
|
|
+ public boolean hasAccessTime() {
|
|
|
+ return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
+ }
|
|
|
+ public long getAccessTime() {
|
|
|
+ return accessTime_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional bytes symlink = 9;
|
|
|
+ public static final int SYMLINK_FIELD_NUMBER = 9;
|
|
|
+ private com.google.protobuf.ByteString symlink_;
|
|
|
+ public boolean hasSymlink() {
|
|
|
+ return ((bitField0_ & 0x00000100) == 0x00000100);
|
|
|
+ }
|
|
|
+ public com.google.protobuf.ByteString getSymlink() {
|
|
|
+ return symlink_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional uint32 block_replication = 10;
|
|
|
+ public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10;
|
|
|
+ private int blockReplication_;
|
|
|
+ public boolean hasBlockReplication() {
|
|
|
+ return ((bitField0_ & 0x00000200) == 0x00000200);
|
|
|
+ }
|
|
|
+ public int getBlockReplication() {
|
|
|
+ return blockReplication_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional uint64 blocksize = 11;
|
|
|
+ public static final int BLOCKSIZE_FIELD_NUMBER = 11;
|
|
|
+ private long blocksize_;
|
|
|
+ public boolean hasBlocksize() {
|
|
|
+ return ((bitField0_ & 0x00000400) == 0x00000400);
|
|
|
+ }
|
|
|
+ public long getBlocksize() {
|
|
|
+ return blocksize_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional .LocatedBlocksProto locations = 12;
|
|
|
+ public static final int LOCATIONS_FIELD_NUMBER = 12;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_;
|
|
|
+ public boolean hasLocations() {
|
|
|
+ return ((bitField0_ & 0x00000800) == 0x00000800);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
|
|
|
+ return locations_;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
|
|
|
+ return locations_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
|
|
|
+ path_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ length_ = 0L;
|
|
|
+ permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
|
|
|
+ owner_ = "";
|
|
|
+ group_ = "";
|
|
|
+ modificationTime_ = 0L;
|
|
|
+ accessTime_ = 0L;
|
|
|
+ symlink_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ blockReplication_ = 0;
|
|
|
+ blocksize_ = 0L;
|
|
|
+ locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasFileType()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasPath()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasLength()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasPermission()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasOwner()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasGroup()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasModificationTime()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasAccessTime()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getPermission().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (hasLocations()) {
|
|
|
+ if (!getLocations().isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeEnum(1, fileType_.getNumber());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeBytes(2, path_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeUInt64(3, length_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeMessage(4, permission_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ output.writeBytes(5, getOwnerBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
+ output.writeBytes(6, getGroupBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
+ output.writeUInt64(7, modificationTime_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
+ output.writeUInt64(8, accessTime_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
|
|
+ output.writeBytes(9, symlink_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
|
|
+ output.writeUInt32(10, blockReplication_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
|
|
|
+ output.writeUInt64(11, blocksize_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
|
|
|
+ output.writeMessage(12, locations_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeEnumSize(1, fileType_.getNumber());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(2, path_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(3, length_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(4, permission_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(5, getOwnerBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(6, getGroupBytes());
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(7, modificationTime_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(8, accessTime_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeBytesSize(9, symlink_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(10, blockReplication_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(11, blocksize_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(12, locations_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasFileType() == other.hasFileType());
|
|
|
+ if (hasFileType()) {
|
|
|
+ result = result &&
|
|
|
+ (getFileType() == other.getFileType());
|
|
|
+ }
|
|
|
+ result = result && (hasPath() == other.hasPath());
|
|
|
+ if (hasPath()) {
|
|
|
+ result = result && getPath()
|
|
|
+ .equals(other.getPath());
|
|
|
+ }
|
|
|
+ result = result && (hasLength() == other.hasLength());
|
|
|
+ if (hasLength()) {
|
|
|
+ result = result && (getLength()
|
|
|
+ == other.getLength());
|
|
|
+ }
|
|
|
+ result = result && (hasPermission() == other.hasPermission());
|
|
|
+ if (hasPermission()) {
|
|
|
+ result = result && getPermission()
|
|
|
+ .equals(other.getPermission());
|
|
|
+ }
|
|
|
+ result = result && (hasOwner() == other.hasOwner());
|
|
|
+ if (hasOwner()) {
|
|
|
+ result = result && getOwner()
|
|
|
+ .equals(other.getOwner());
|
|
|
+ }
|
|
|
+ result = result && (hasGroup() == other.hasGroup());
|
|
|
+ if (hasGroup()) {
|
|
|
+ result = result && getGroup()
|
|
|
+ .equals(other.getGroup());
|
|
|
+ }
|
|
|
+ result = result && (hasModificationTime() == other.hasModificationTime());
|
|
|
+ if (hasModificationTime()) {
|
|
|
+ result = result && (getModificationTime()
|
|
|
+ == other.getModificationTime());
|
|
|
+ }
|
|
|
+ result = result && (hasAccessTime() == other.hasAccessTime());
|
|
|
+ if (hasAccessTime()) {
|
|
|
+ result = result && (getAccessTime()
|
|
|
+ == other.getAccessTime());
|
|
|
+ }
|
|
|
+ result = result && (hasSymlink() == other.hasSymlink());
|
|
|
+ if (hasSymlink()) {
|
|
|
+ result = result && getSymlink()
|
|
|
+ .equals(other.getSymlink());
|
|
|
+ }
|
|
|
+ result = result && (hasBlockReplication() == other.hasBlockReplication());
|
|
|
+ if (hasBlockReplication()) {
|
|
|
+ result = result && (getBlockReplication()
|
|
|
+ == other.getBlockReplication());
|
|
|
+ }
|
|
|
+ result = result && (hasBlocksize() == other.hasBlocksize());
|
|
|
+ if (hasBlocksize()) {
|
|
|
+ result = result && (getBlocksize()
|
|
|
+ == other.getBlocksize());
|
|
|
+ }
|
|
|
+ result = result && (hasLocations() == other.hasLocations());
|
|
|
+ if (hasLocations()) {
|
|
|
+ result = result && getLocations()
|
|
|
+ .equals(other.getLocations());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasFileType()) {
|
|
|
+ hash = (37 * hash) + FILETYPE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashEnum(getFileType());
|
|
|
+ }
|
|
|
+ if (hasPath()) {
|
|
|
+ hash = (37 * hash) + PATH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getPath().hashCode();
|
|
|
+ }
|
|
|
+ if (hasLength()) {
|
|
|
+ hash = (37 * hash) + LENGTH_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getLength());
|
|
|
+ }
|
|
|
+ if (hasPermission()) {
|
|
|
+ hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getPermission().hashCode();
|
|
|
+ }
|
|
|
+ if (hasOwner()) {
|
|
|
+ hash = (37 * hash) + OWNER_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getOwner().hashCode();
|
|
|
+ }
|
|
|
+ if (hasGroup()) {
|
|
|
+ hash = (37 * hash) + GROUP_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getGroup().hashCode();
|
|
|
+ }
|
|
|
+ if (hasModificationTime()) {
|
|
|
+ hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getModificationTime());
|
|
|
+ }
|
|
|
+ if (hasAccessTime()) {
|
|
|
+ hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getAccessTime());
|
|
|
+ }
|
|
|
+ if (hasSymlink()) {
|
|
|
+ hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getSymlink().hashCode();
|
|
|
+ }
|
|
|
+ if (hasBlockReplication()) {
|
|
|
+ hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBlockReplication();
|
|
|
+ }
|
|
|
+ if (hasBlocksize()) {
|
|
|
+ hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getBlocksize());
|
|
|
+ }
|
|
|
+ if (hasLocations()) {
|
|
|
+ hash = (37 * hash) + LOCATIONS_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getLocations().hashCode();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_HdfsFileStatusProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getPermissionFieldBuilder();
|
|
|
+ getLocationsFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ path_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ length_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ permissionBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ owner_ = "";
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ group_ = "";
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
+ modificationTime_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
+ accessTime_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
+ symlink_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000100);
|
|
|
+ blockReplication_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000200);
|
|
|
+ blocksize_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000400);
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
|
|
|
+ } else {
|
|
|
+ locationsBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000800);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.fileType_ = fileType_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.path_ = path_;
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.length_ = length_;
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ result.permission_ = permission_;
|
|
|
+ } else {
|
|
|
+ result.permission_ = permissionBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ to_bitField0_ |= 0x00000010;
|
|
|
+ }
|
|
|
+ result.owner_ = owner_;
|
|
|
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
|
|
|
+ to_bitField0_ |= 0x00000020;
|
|
|
+ }
|
|
|
+ result.group_ = group_;
|
|
|
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
|
|
|
+ to_bitField0_ |= 0x00000040;
|
|
|
+ }
|
|
|
+ result.modificationTime_ = modificationTime_;
|
|
|
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
|
|
|
+ to_bitField0_ |= 0x00000080;
|
|
|
+ }
|
|
|
+ result.accessTime_ = accessTime_;
|
|
|
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
|
|
|
+ to_bitField0_ |= 0x00000100;
|
|
|
+ }
|
|
|
+ result.symlink_ = symlink_;
|
|
|
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
|
|
|
+ to_bitField0_ |= 0x00000200;
|
|
|
+ }
|
|
|
+ result.blockReplication_ = blockReplication_;
|
|
|
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
|
|
|
+ to_bitField0_ |= 0x00000400;
|
|
|
+ }
|
|
|
+ result.blocksize_ = blocksize_;
|
|
|
+ if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
|
|
|
+ to_bitField0_ |= 0x00000800;
|
|
|
+ }
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ result.locations_ = locations_;
|
|
|
+ } else {
|
|
|
+ result.locations_ = locationsBuilder_.build();
|
|
|
+ }
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasFileType()) {
|
|
|
+ setFileType(other.getFileType());
|
|
|
+ }
|
|
|
+ if (other.hasPath()) {
|
|
|
+ setPath(other.getPath());
|
|
|
+ }
|
|
|
+ if (other.hasLength()) {
|
|
|
+ setLength(other.getLength());
|
|
|
+ }
|
|
|
+ if (other.hasPermission()) {
|
|
|
+ mergePermission(other.getPermission());
|
|
|
+ }
|
|
|
+ if (other.hasOwner()) {
|
|
|
+ setOwner(other.getOwner());
|
|
|
+ }
|
|
|
+ if (other.hasGroup()) {
|
|
|
+ setGroup(other.getGroup());
|
|
|
+ }
|
|
|
+ if (other.hasModificationTime()) {
|
|
|
+ setModificationTime(other.getModificationTime());
|
|
|
+ }
|
|
|
+ if (other.hasAccessTime()) {
|
|
|
+ setAccessTime(other.getAccessTime());
|
|
|
+ }
|
|
|
+ if (other.hasSymlink()) {
|
|
|
+ setSymlink(other.getSymlink());
|
|
|
+ }
|
|
|
+ if (other.hasBlockReplication()) {
|
|
|
+ setBlockReplication(other.getBlockReplication());
|
|
|
+ }
|
|
|
+ if (other.hasBlocksize()) {
|
|
|
+ setBlocksize(other.getBlocksize());
|
|
|
+ }
|
|
|
+ if (other.hasLocations()) {
|
|
|
+ mergeLocations(other.getLocations());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasFileType()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasPath()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasLength()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasPermission()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasOwner()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasGroup()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasModificationTime()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasAccessTime()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!getPermission().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (hasLocations()) {
|
|
|
+ if (!getLocations().isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ int rawValue = input.readEnum();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.valueOf(rawValue);
|
|
|
+ if (value == null) {
|
|
|
+ unknownFields.mergeVarintField(1, rawValue);
|
|
|
+ } else {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ fileType_ = value;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 18: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ path_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 24: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ length_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 34: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder();
|
|
|
+ if (hasPermission()) {
|
|
|
+ subBuilder.mergeFrom(getPermission());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setPermission(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 42: {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ owner_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 50: {
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
+ group_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 56: {
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
+ modificationTime_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 64: {
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
+ accessTime_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 74: {
|
|
|
+ bitField0_ |= 0x00000100;
|
|
|
+ symlink_ = input.readBytes();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 80: {
|
|
|
+ bitField0_ |= 0x00000200;
|
|
|
+ blockReplication_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 88: {
|
|
|
+ bitField0_ |= 0x00000400;
|
|
|
+ blocksize_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 98: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder();
|
|
|
+ if (hasLocations()) {
|
|
|
+ subBuilder.mergeFrom(getLocations());
|
|
|
+ }
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ setLocations(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required .HdfsFileStatusProto.FileType fileType = 1;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
|
|
|
+ public boolean hasFileType() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
|
|
|
+ return fileType_;
|
|
|
+ }
|
|
|
+ public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ fileType_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearFileType() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ fileType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required bytes path = 2;
|
|
|
+ private com.google.protobuf.ByteString path_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ public boolean hasPath() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public com.google.protobuf.ByteString getPath() {
|
|
|
+ return path_;
|
|
|
+ }
|
|
|
+ public Builder setPath(com.google.protobuf.ByteString value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ path_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearPath() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ path_ = getDefaultInstance().getPath();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 length = 3;
|
|
|
+ private long length_ ;
|
|
|
+ public boolean hasLength() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public long getLength() {
|
|
|
+ return length_;
|
|
|
+ }
|
|
|
+ public Builder setLength(long value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ length_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLength() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ length_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required .FsPermissionProto permission = 4;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder> permissionBuilder_;
|
|
|
+ public boolean hasPermission() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto getPermission() {
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ return permission_;
|
|
|
+ } else {
|
|
|
+ return permissionBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) {
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ permission_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ permissionBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setPermission(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder builderForValue) {
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ permission_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ permissionBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto value) {
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008) &&
|
|
|
+ permission_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance()) {
|
|
|
+ permission_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.newBuilder(permission_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ permission_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ permissionBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearPermission() {
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ permission_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ permissionBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder getPermissionBuilder() {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ onChanged();
|
|
|
+ return getPermissionFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
|
|
|
+ if (permissionBuilder_ != null) {
|
|
|
+ return permissionBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return permission_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>
|
|
|
+ getPermissionFieldBuilder() {
|
|
|
+ if (permissionBuilder_ == null) {
|
|
|
+ permissionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProtoOrBuilder>(
|
|
|
+ permission_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ permission_ = null;
|
|
|
+ }
|
|
|
+ return permissionBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string owner = 5;
|
|
|
+ private java.lang.Object owner_ = "";
|
|
|
+ public boolean hasOwner() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public String getOwner() {
|
|
|
+ java.lang.Object ref = owner_;
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
+ owner_ = s;
|
|
|
+ return s;
|
|
|
+ } else {
|
|
|
+ return (String) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setOwner(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ owner_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearOwner() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ owner_ = getDefaultInstance().getOwner();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void setOwner(com.google.protobuf.ByteString value) {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ owner_ = value;
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // required string group = 6;
|
|
|
+ private java.lang.Object group_ = "";
|
|
|
+ public boolean hasGroup() {
|
|
|
+ return ((bitField0_ & 0x00000020) == 0x00000020);
|
|
|
+ }
|
|
|
+ public String getGroup() {
|
|
|
+ java.lang.Object ref = group_;
|
|
|
+ if (!(ref instanceof String)) {
|
|
|
+ String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
|
|
|
+ group_ = s;
|
|
|
+ return s;
|
|
|
+ } else {
|
|
|
+ return (String) ref;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setGroup(String value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
+ group_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearGroup() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000020);
|
|
|
+ group_ = getDefaultInstance().getGroup();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ void setGroup(com.google.protobuf.ByteString value) {
|
|
|
+ bitField0_ |= 0x00000020;
|
|
|
+ group_ = value;
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 modification_time = 7;
|
|
|
+ private long modificationTime_ ;
|
|
|
+ public boolean hasModificationTime() {
|
|
|
+ return ((bitField0_ & 0x00000040) == 0x00000040);
|
|
|
+ }
|
|
|
+ public long getModificationTime() {
|
|
|
+ return modificationTime_;
|
|
|
+ }
|
|
|
+ public Builder setModificationTime(long value) {
|
|
|
+ bitField0_ |= 0x00000040;
|
|
|
+ modificationTime_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearModificationTime() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000040);
|
|
|
+ modificationTime_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint64 access_time = 8;
|
|
|
+ private long accessTime_ ;
|
|
|
+ public boolean hasAccessTime() {
|
|
|
+ return ((bitField0_ & 0x00000080) == 0x00000080);
|
|
|
+ }
|
|
|
+ public long getAccessTime() {
|
|
|
+ return accessTime_;
|
|
|
+ }
|
|
|
+ public Builder setAccessTime(long value) {
|
|
|
+ bitField0_ |= 0x00000080;
|
|
|
+ accessTime_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearAccessTime() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000080);
|
|
|
+ accessTime_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional bytes symlink = 9;
|
|
|
+ private com.google.protobuf.ByteString symlink_ = com.google.protobuf.ByteString.EMPTY;
|
|
|
+ public boolean hasSymlink() {
|
|
|
+ return ((bitField0_ & 0x00000100) == 0x00000100);
|
|
|
+ }
|
|
|
+ public com.google.protobuf.ByteString getSymlink() {
|
|
|
+ return symlink_;
|
|
|
+ }
|
|
|
+ public Builder setSymlink(com.google.protobuf.ByteString value) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000100;
|
|
|
+ symlink_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearSymlink() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000100);
|
|
|
+ symlink_ = getDefaultInstance().getSymlink();
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional uint32 block_replication = 10;
|
|
|
+ private int blockReplication_ ;
|
|
|
+ public boolean hasBlockReplication() {
|
|
|
+ return ((bitField0_ & 0x00000200) == 0x00000200);
|
|
|
+ }
|
|
|
+ public int getBlockReplication() {
|
|
|
+ return blockReplication_;
|
|
|
+ }
|
|
|
+ public Builder setBlockReplication(int value) {
|
|
|
+ bitField0_ |= 0x00000200;
|
|
|
+ blockReplication_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlockReplication() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000200);
|
|
|
+ blockReplication_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional uint64 blocksize = 11;
|
|
|
+ private long blocksize_ ;
|
|
|
+ public boolean hasBlocksize() {
|
|
|
+ return ((bitField0_ & 0x00000400) == 0x00000400);
|
|
|
+ }
|
|
|
+ public long getBlocksize() {
|
|
|
+ return blocksize_;
|
|
|
+ }
|
|
|
+ public Builder setBlocksize(long value) {
|
|
|
+ bitField0_ |= 0x00000400;
|
|
|
+ blocksize_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlocksize() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000400);
|
|
|
+ blocksize_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // optional .LocatedBlocksProto locations = 12;
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_;
|
|
|
+ public boolean hasLocations() {
|
|
|
+ return ((bitField0_ & 0x00000800) == 0x00000800);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ return locations_;
|
|
|
+ } else {
|
|
|
+ return locationsBuilder_.getMessage();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ locations_ = value;
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locationsBuilder_.setMessage(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000800;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setLocations(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) {
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ locations_ = builderForValue.build();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locationsBuilder_.setMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000800;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000800) == 0x00000800) &&
|
|
|
+ locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) {
|
|
|
+ locations_ =
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder(locations_).mergeFrom(value).buildPartial();
|
|
|
+ } else {
|
|
|
+ locations_ = value;
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locationsBuilder_.mergeFrom(value);
|
|
|
+ }
|
|
|
+ bitField0_ |= 0x00000800;
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearLocations() {
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ locations_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ locationsBuilder_.clear();
|
|
|
+ }
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000800);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() {
|
|
|
+ bitField0_ |= 0x00000800;
|
|
|
+ onChanged();
|
|
|
+ return getLocationsFieldBuilder().getBuilder();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
|
|
|
+ if (locationsBuilder_ != null) {
|
|
|
+ return locationsBuilder_.getMessageOrBuilder();
|
|
|
+ } else {
|
|
|
+ return locations_;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>
|
|
|
+ getLocationsFieldBuilder() {
|
|
|
+ if (locationsBuilder_ == null) {
|
|
|
+ locationsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>(
|
|
|
+ locations_,
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ locations_ = null;
|
|
|
+ }
|
|
|
+ return locationsBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:HdfsFileStatusProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new HdfsFileStatusProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:HdfsFileStatusProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface FsServerDefaultsProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required uint64 blockSize = 1;
|
|
|
+ boolean hasBlockSize();
|
|
|
+ long getBlockSize();
|
|
|
+
|
|
|
+ // required uint32 bytesPerChecksum = 2;
|
|
|
+ boolean hasBytesPerChecksum();
|
|
|
+ int getBytesPerChecksum();
|
|
|
+
|
|
|
+ // required uint32 writePacketSize = 3;
|
|
|
+ boolean hasWritePacketSize();
|
|
|
+ int getWritePacketSize();
|
|
|
+
|
|
|
+ // required uint32 replication = 4;
|
|
|
+ boolean hasReplication();
|
|
|
+ int getReplication();
|
|
|
+
|
|
|
+ // required uint32 fileBufferSize = 5;
|
|
|
+ boolean hasFileBufferSize();
|
|
|
+ int getFileBufferSize();
|
|
|
+ }
|
|
|
+ public static final class FsServerDefaultsProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements FsServerDefaultsProtoOrBuilder {
|
|
|
+ // Use FsServerDefaultsProto.newBuilder() to construct.
|
|
|
+ private FsServerDefaultsProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private FsServerDefaultsProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final FsServerDefaultsProto defaultInstance;
|
|
|
+ public static FsServerDefaultsProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public FsServerDefaultsProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required uint64 blockSize = 1;
|
|
|
+ public static final int BLOCKSIZE_FIELD_NUMBER = 1;
|
|
|
+ private long blockSize_;
|
|
|
+ public boolean hasBlockSize() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getBlockSize() {
|
|
|
+ return blockSize_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 bytesPerChecksum = 2;
|
|
|
+ public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
|
|
|
+ private int bytesPerChecksum_;
|
|
|
+ public boolean hasBytesPerChecksum() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public int getBytesPerChecksum() {
|
|
|
+ return bytesPerChecksum_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 writePacketSize = 3;
|
|
|
+ public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3;
|
|
|
+ private int writePacketSize_;
|
|
|
+ public boolean hasWritePacketSize() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public int getWritePacketSize() {
|
|
|
+ return writePacketSize_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 replication = 4;
|
|
|
+ public static final int REPLICATION_FIELD_NUMBER = 4;
|
|
|
+ private int replication_;
|
|
|
+ public boolean hasReplication() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public int getReplication() {
|
|
|
+ return replication_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 fileBufferSize = 5;
|
|
|
+ public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5;
|
|
|
+ private int fileBufferSize_;
|
|
|
+ public boolean hasFileBufferSize() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public int getFileBufferSize() {
|
|
|
+ return fileBufferSize_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ blockSize_ = 0L;
|
|
|
+ bytesPerChecksum_ = 0;
|
|
|
+ writePacketSize_ = 0;
|
|
|
+ replication_ = 0;
|
|
|
+ fileBufferSize_ = 0;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasBlockSize()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasBytesPerChecksum()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasWritePacketSize()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasReplication()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasFileBufferSize()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt64(1, blockSize_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt32(2, bytesPerChecksum_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ output.writeUInt32(3, writePacketSize_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ output.writeUInt32(4, replication_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ output.writeUInt32(5, fileBufferSize_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt64Size(1, blockSize_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(2, bytesPerChecksum_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(3, writePacketSize_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(4, replication_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(5, fileBufferSize_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasBlockSize() == other.hasBlockSize());
|
|
|
+ if (hasBlockSize()) {
|
|
|
+ result = result && (getBlockSize()
|
|
|
+ == other.getBlockSize());
|
|
|
+ }
|
|
|
+ result = result && (hasBytesPerChecksum() == other.hasBytesPerChecksum());
|
|
|
+ if (hasBytesPerChecksum()) {
|
|
|
+ result = result && (getBytesPerChecksum()
|
|
|
+ == other.getBytesPerChecksum());
|
|
|
+ }
|
|
|
+ result = result && (hasWritePacketSize() == other.hasWritePacketSize());
|
|
|
+ if (hasWritePacketSize()) {
|
|
|
+ result = result && (getWritePacketSize()
|
|
|
+ == other.getWritePacketSize());
|
|
|
+ }
|
|
|
+ result = result && (hasReplication() == other.hasReplication());
|
|
|
+ if (hasReplication()) {
|
|
|
+ result = result && (getReplication()
|
|
|
+ == other.getReplication());
|
|
|
+ }
|
|
|
+ result = result && (hasFileBufferSize() == other.hasFileBufferSize());
|
|
|
+ if (hasFileBufferSize()) {
|
|
|
+ result = result && (getFileBufferSize()
|
|
|
+ == other.getFileBufferSize());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasBlockSize()) {
|
|
|
+ hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + hashLong(getBlockSize());
|
|
|
+ }
|
|
|
+ if (hasBytesPerChecksum()) {
|
|
|
+ hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getBytesPerChecksum();
|
|
|
+ }
|
|
|
+ if (hasWritePacketSize()) {
|
|
|
+ hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getWritePacketSize();
|
|
|
+ }
|
|
|
+ if (hasReplication()) {
|
|
|
+ hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getReplication();
|
|
|
+ }
|
|
|
+ if (hasFileBufferSize()) {
|
|
|
+ hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getFileBufferSize();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_FsServerDefaultsProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ blockSize_ = 0L;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ bytesPerChecksum_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ writePacketSize_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ replication_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ fileBufferSize_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.blockSize_ = blockSize_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.bytesPerChecksum_ = bytesPerChecksum_;
|
|
|
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
|
|
+ to_bitField0_ |= 0x00000004;
|
|
|
+ }
|
|
|
+ result.writePacketSize_ = writePacketSize_;
|
|
|
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
|
|
+ to_bitField0_ |= 0x00000008;
|
|
|
+ }
|
|
|
+ result.replication_ = replication_;
|
|
|
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
|
|
|
+ to_bitField0_ |= 0x00000010;
|
|
|
+ }
|
|
|
+ result.fileBufferSize_ = fileBufferSize_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasBlockSize()) {
|
|
|
+ setBlockSize(other.getBlockSize());
|
|
|
+ }
|
|
|
+ if (other.hasBytesPerChecksum()) {
|
|
|
+ setBytesPerChecksum(other.getBytesPerChecksum());
|
|
|
+ }
|
|
|
+ if (other.hasWritePacketSize()) {
|
|
|
+ setWritePacketSize(other.getWritePacketSize());
|
|
|
+ }
|
|
|
+ if (other.hasReplication()) {
|
|
|
+ setReplication(other.getReplication());
|
|
|
+ }
|
|
|
+ if (other.hasFileBufferSize()) {
|
|
|
+ setFileBufferSize(other.getFileBufferSize());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasBlockSize()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasBytesPerChecksum()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasWritePacketSize()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasReplication()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasFileBufferSize()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ blockSize_ = input.readUInt64();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ bytesPerChecksum_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 24: {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ writePacketSize_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 32: {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ replication_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 40: {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ fileBufferSize_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required uint64 blockSize = 1;
|
|
|
+ private long blockSize_ ;
|
|
|
+ public boolean hasBlockSize() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public long getBlockSize() {
|
|
|
+ return blockSize_;
|
|
|
+ }
|
|
|
+ public Builder setBlockSize(long value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ blockSize_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBlockSize() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ blockSize_ = 0L;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 bytesPerChecksum = 2;
|
|
|
+ private int bytesPerChecksum_ ;
|
|
|
+ public boolean hasBytesPerChecksum() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public int getBytesPerChecksum() {
|
|
|
+ return bytesPerChecksum_;
|
|
|
+ }
|
|
|
+ public Builder setBytesPerChecksum(int value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ bytesPerChecksum_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearBytesPerChecksum() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ bytesPerChecksum_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 writePacketSize = 3;
|
|
|
+ private int writePacketSize_ ;
|
|
|
+ public boolean hasWritePacketSize() {
|
|
|
+ return ((bitField0_ & 0x00000004) == 0x00000004);
|
|
|
+ }
|
|
|
+ public int getWritePacketSize() {
|
|
|
+ return writePacketSize_;
|
|
|
+ }
|
|
|
+ public Builder setWritePacketSize(int value) {
|
|
|
+ bitField0_ |= 0x00000004;
|
|
|
+ writePacketSize_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearWritePacketSize() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000004);
|
|
|
+ writePacketSize_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 replication = 4;
|
|
|
+ private int replication_ ;
|
|
|
+ public boolean hasReplication() {
|
|
|
+ return ((bitField0_ & 0x00000008) == 0x00000008);
|
|
|
+ }
|
|
|
+ public int getReplication() {
|
|
|
+ return replication_;
|
|
|
+ }
|
|
|
+ public Builder setReplication(int value) {
|
|
|
+ bitField0_ |= 0x00000008;
|
|
|
+ replication_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearReplication() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000008);
|
|
|
+ replication_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 fileBufferSize = 5;
|
|
|
+ private int fileBufferSize_ ;
|
|
|
+ public boolean hasFileBufferSize() {
|
|
|
+ return ((bitField0_ & 0x00000010) == 0x00000010);
|
|
|
+ }
|
|
|
+ public int getFileBufferSize() {
|
|
|
+ return fileBufferSize_;
|
|
|
+ }
|
|
|
+ public Builder setFileBufferSize(int value) {
|
|
|
+ bitField0_ |= 0x00000010;
|
|
|
+ fileBufferSize_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearFileBufferSize() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000010);
|
|
|
+ fileBufferSize_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:FsServerDefaultsProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new FsServerDefaultsProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:FsServerDefaultsProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface DirectoryListingProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // repeated .HdfsFileStatusProto partialListing = 1;
|
|
|
+ java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto>
|
|
|
+ getPartialListingList();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index);
|
|
|
+ int getPartialListingCount();
|
|
|
+ java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
|
|
|
+ getPartialListingOrBuilderList();
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
|
|
|
+ int index);
|
|
|
+
|
|
|
+ // required uint32 remainingEntries = 2;
|
|
|
+ boolean hasRemainingEntries();
|
|
|
+ int getRemainingEntries();
|
|
|
+ }
|
|
|
+ public static final class DirectoryListingProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements DirectoryListingProtoOrBuilder {
|
|
|
+ // Use DirectoryListingProto.newBuilder() to construct.
|
|
|
+ private DirectoryListingProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private DirectoryListingProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final DirectoryListingProto defaultInstance;
|
|
|
+ public static DirectoryListingProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public DirectoryListingProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // repeated .HdfsFileStatusProto partialListing = 1;
|
|
|
+ public static final int PARTIALLISTING_FIELD_NUMBER = 1;
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> partialListing_;
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> getPartialListingList() {
|
|
|
+ return partialListing_;
|
|
|
+ }
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
|
|
|
+ getPartialListingOrBuilderList() {
|
|
|
+ return partialListing_;
|
|
|
+ }
|
|
|
+ public int getPartialListingCount() {
|
|
|
+ return partialListing_.size();
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
|
|
|
+ return partialListing_.get(index);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
|
|
|
+ int index) {
|
|
|
+ return partialListing_.get(index);
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 remainingEntries = 2;
|
|
|
+ public static final int REMAININGENTRIES_FIELD_NUMBER = 2;
|
|
|
+ private int remainingEntries_;
|
|
|
+ public boolean hasRemainingEntries() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public int getRemainingEntries() {
|
|
|
+ return remainingEntries_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ partialListing_ = java.util.Collections.emptyList();
|
|
|
+ remainingEntries_ = 0;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasRemainingEntries()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ for (int i = 0; i < getPartialListingCount(); i++) {
|
|
|
+ if (!getPartialListing(i).isInitialized()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ for (int i = 0; i < partialListing_.size(); i++) {
|
|
|
+ output.writeMessage(1, partialListing_.get(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt32(2, remainingEntries_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ for (int i = 0; i < partialListing_.size(); i++) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeMessageSize(1, partialListing_.get(i));
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(2, remainingEntries_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && getPartialListingList()
|
|
|
+ .equals(other.getPartialListingList());
|
|
|
+ result = result && (hasRemainingEntries() == other.hasRemainingEntries());
|
|
|
+ if (hasRemainingEntries()) {
|
|
|
+ result = result && (getRemainingEntries()
|
|
|
+ == other.getRemainingEntries());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (getPartialListingCount() > 0) {
|
|
|
+ hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getPartialListingList().hashCode();
|
|
|
+ }
|
|
|
+ if (hasRemainingEntries()) {
|
|
|
+ hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getRemainingEntries();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_DirectoryListingProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ getPartialListingFieldBuilder();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ partialListing_ = java.util.Collections.emptyList();
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.clear();
|
|
|
+ }
|
|
|
+ remainingEntries_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ partialListing_ = java.util.Collections.unmodifiableList(partialListing_);
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ }
|
|
|
+ result.partialListing_ = partialListing_;
|
|
|
+ } else {
|
|
|
+ result.partialListing_ = partialListingBuilder_.build();
|
|
|
+ }
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.remainingEntries_ = remainingEntries_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this;
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ if (!other.partialListing_.isEmpty()) {
|
|
|
+ if (partialListing_.isEmpty()) {
|
|
|
+ partialListing_ = other.partialListing_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ } else {
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.addAll(other.partialListing_);
|
|
|
+ }
|
|
|
+ onChanged();
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (!other.partialListing_.isEmpty()) {
|
|
|
+ if (partialListingBuilder_.isEmpty()) {
|
|
|
+ partialListingBuilder_.dispose();
|
|
|
+ partialListingBuilder_ = null;
|
|
|
+ partialListing_ = other.partialListing_;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ partialListingBuilder_ =
|
|
|
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
|
|
|
+ getPartialListingFieldBuilder() : null;
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.addAllMessages(other.partialListing_);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (other.hasRemainingEntries()) {
|
|
|
+ setRemainingEntries(other.getRemainingEntries());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasRemainingEntries()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ for (int i = 0; i < getPartialListingCount(); i++) {
|
|
|
+ if (!getPartialListing(i).isInitialized()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 10: {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder();
|
|
|
+ input.readMessage(subBuilder, extensionRegistry);
|
|
|
+ addPartialListing(subBuilder.buildPartial());
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ remainingEntries_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // repeated .HdfsFileStatusProto partialListing = 1;
|
|
|
+ private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> partialListing_ =
|
|
|
+ java.util.Collections.emptyList();
|
|
|
+ private void ensurePartialListingIsMutable() {
|
|
|
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ partialListing_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto>(partialListing_);
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_;
|
|
|
+
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> getPartialListingList() {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ return java.util.Collections.unmodifiableList(partialListing_);
|
|
|
+ } else {
|
|
|
+ return partialListingBuilder_.getMessageList();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public int getPartialListingCount() {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ return partialListing_.size();
|
|
|
+ } else {
|
|
|
+ return partialListingBuilder_.getCount();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ return partialListing_.get(index);
|
|
|
+ } else {
|
|
|
+ return partialListingBuilder_.getMessage(index);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public Builder setPartialListing(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.set(index, value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.setMessage(index, value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder setPartialListing(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.set(index, builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.setMessage(index, builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.add(value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.addMessage(value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addPartialListing(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ if (value == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.add(index, value);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.addMessage(index, value);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addPartialListing(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.add(builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.addMessage(builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addPartialListing(
|
|
|
+ int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.add(index, builderForValue.build());
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.addMessage(index, builderForValue.build());
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder addAllPartialListing(
|
|
|
+ java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> values) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ super.addAll(values, partialListing_);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.addAllMessages(values);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearPartialListing() {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ partialListing_ = java.util.Collections.emptyList();
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.clear();
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder removePartialListing(int index) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ ensurePartialListingIsMutable();
|
|
|
+ partialListing_.remove(index);
|
|
|
+ onChanged();
|
|
|
+ } else {
|
|
|
+ partialListingBuilder_.remove(index);
|
|
|
+ }
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder(
|
|
|
+ int index) {
|
|
|
+ return getPartialListingFieldBuilder().getBuilder(index);
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
|
|
|
+ int index) {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ return partialListing_.get(index); } else {
|
|
|
+ return partialListingBuilder_.getMessageOrBuilder(index);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
|
|
|
+ getPartialListingOrBuilderList() {
|
|
|
+ if (partialListingBuilder_ != null) {
|
|
|
+ return partialListingBuilder_.getMessageOrBuilderList();
|
|
|
+ } else {
|
|
|
+ return java.util.Collections.unmodifiableList(partialListing_);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() {
|
|
|
+ return getPartialListingFieldBuilder().addBuilder(
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder(
|
|
|
+ int index) {
|
|
|
+ return getPartialListingFieldBuilder().addBuilder(
|
|
|
+ index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
|
|
|
+ }
|
|
|
+ public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder>
|
|
|
+ getPartialListingBuilderList() {
|
|
|
+ return getPartialListingFieldBuilder().getBuilderList();
|
|
|
+ }
|
|
|
+ private com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>
|
|
|
+ getPartialListingFieldBuilder() {
|
|
|
+ if (partialListingBuilder_ == null) {
|
|
|
+ partialListingBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
|
|
|
+ partialListing_,
|
|
|
+ ((bitField0_ & 0x00000001) == 0x00000001),
|
|
|
+ getParentForChildren(),
|
|
|
+ isClean());
|
|
|
+ partialListing_ = null;
|
|
|
+ }
|
|
|
+ return partialListingBuilder_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 remainingEntries = 2;
|
|
|
+ private int remainingEntries_ ;
|
|
|
+ public boolean hasRemainingEntries() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public int getRemainingEntries() {
|
|
|
+ return remainingEntries_;
|
|
|
+ }
|
|
|
+ public Builder setRemainingEntries(int value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ remainingEntries_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearRemainingEntries() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ remainingEntries_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:DirectoryListingProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new DirectoryListingProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:DirectoryListingProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ public interface UpgradeStatusReportProtoOrBuilder
|
|
|
+ extends com.google.protobuf.MessageOrBuilder {
|
|
|
+
|
|
|
+ // required uint32 version = 1;
|
|
|
+ boolean hasVersion();
|
|
|
+ int getVersion();
|
|
|
+
|
|
|
+ // required uint32 upgradeStatus = 2;
|
|
|
+ boolean hasUpgradeStatus();
|
|
|
+ int getUpgradeStatus();
|
|
|
+ }
|
|
|
+ public static final class UpgradeStatusReportProto extends
|
|
|
+ com.google.protobuf.GeneratedMessage
|
|
|
+ implements UpgradeStatusReportProtoOrBuilder {
|
|
|
+ // Use UpgradeStatusReportProto.newBuilder() to construct.
|
|
|
+ private UpgradeStatusReportProto(Builder builder) {
|
|
|
+ super(builder);
|
|
|
+ }
|
|
|
+ private UpgradeStatusReportProto(boolean noInit) {}
|
|
|
+
|
|
|
+ private static final UpgradeStatusReportProto defaultInstance;
|
|
|
+ public static UpgradeStatusReportProto getDefaultInstance() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public UpgradeStatusReportProto getDefaultInstanceForType() {
|
|
|
+ return defaultInstance;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+ // required uint32 version = 1;
|
|
|
+ public static final int VERSION_FIELD_NUMBER = 1;
|
|
|
+ private int version_;
|
|
|
+ public boolean hasVersion() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public int getVersion() {
|
|
|
+ return version_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 upgradeStatus = 2;
|
|
|
+ public static final int UPGRADESTATUS_FIELD_NUMBER = 2;
|
|
|
+ private int upgradeStatus_;
|
|
|
+ public boolean hasUpgradeStatus() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public int getUpgradeStatus() {
|
|
|
+ return upgradeStatus_;
|
|
|
+ }
|
|
|
+
|
|
|
+ private void initFields() {
|
|
|
+ version_ = 0;
|
|
|
+ upgradeStatus_ = 0;
|
|
|
+ }
|
|
|
+ private byte memoizedIsInitialized = -1;
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ byte isInitialized = memoizedIsInitialized;
|
|
|
+ if (isInitialized != -1) return isInitialized == 1;
|
|
|
+
|
|
|
+ if (!hasVersion()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasUpgradeStatus()) {
|
|
|
+ memoizedIsInitialized = 0;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ memoizedIsInitialized = 1;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
|
|
|
+ throws java.io.IOException {
|
|
|
+ getSerializedSize();
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ output.writeUInt32(1, version_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ output.writeUInt32(2, upgradeStatus_);
|
|
|
+ }
|
|
|
+ getUnknownFields().writeTo(output);
|
|
|
+ }
|
|
|
+
|
|
|
+ private int memoizedSerializedSize = -1;
|
|
|
+ public int getSerializedSize() {
|
|
|
+ int size = memoizedSerializedSize;
|
|
|
+ if (size != -1) return size;
|
|
|
+
|
|
|
+ size = 0;
|
|
|
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(1, version_);
|
|
|
+ }
|
|
|
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ size += com.google.protobuf.CodedOutputStream
|
|
|
+ .computeUInt32Size(2, upgradeStatus_);
|
|
|
+ }
|
|
|
+ size += getUnknownFields().getSerializedSize();
|
|
|
+ memoizedSerializedSize = size;
|
|
|
+ return size;
|
|
|
+ }
|
|
|
+
|
|
|
+ private static final long serialVersionUID = 0L;
|
|
|
+ @java.lang.Override
|
|
|
+ protected java.lang.Object writeReplace()
|
|
|
+ throws java.io.ObjectStreamException {
|
|
|
+ return super.writeReplace();
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public boolean equals(final java.lang.Object obj) {
|
|
|
+ if (obj == this) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto)) {
|
|
|
+ return super.equals(obj);
|
|
|
+ }
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto) obj;
|
|
|
+
|
|
|
+ boolean result = true;
|
|
|
+ result = result && (hasVersion() == other.hasVersion());
|
|
|
+ if (hasVersion()) {
|
|
|
+ result = result && (getVersion()
|
|
|
+ == other.getVersion());
|
|
|
+ }
|
|
|
+ result = result && (hasUpgradeStatus() == other.hasUpgradeStatus());
|
|
|
+ if (hasUpgradeStatus()) {
|
|
|
+ result = result && (getUpgradeStatus()
|
|
|
+ == other.getUpgradeStatus());
|
|
|
+ }
|
|
|
+ result = result &&
|
|
|
+ getUnknownFields().equals(other.getUnknownFields());
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ public int hashCode() {
|
|
|
+ int hash = 41;
|
|
|
+ hash = (19 * hash) + getDescriptorForType().hashCode();
|
|
|
+ if (hasVersion()) {
|
|
|
+ hash = (37 * hash) + VERSION_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getVersion();
|
|
|
+ }
|
|
|
+ if (hasUpgradeStatus()) {
|
|
|
+ hash = (37 * hash) + UPGRADESTATUS_FIELD_NUMBER;
|
|
|
+ hash = (53 * hash) + getUpgradeStatus();
|
|
|
+ }
|
|
|
+ hash = (29 * hash) + getUnknownFields().hashCode();
|
|
|
+ return hash;
|
|
|
+ }
|
|
|
+
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(
|
|
|
+ com.google.protobuf.ByteString data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(byte[] data)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(
|
|
|
+ byte[] data,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ return newBuilder().mergeFrom(data, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseDelimitedFrom(java.io.InputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseDelimitedFrom(
|
|
|
+ java.io.InputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ Builder builder = newBuilder();
|
|
|
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
|
|
|
+ return builder.buildParsed();
|
|
|
+ } else {
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input).buildParsed();
|
|
|
+ }
|
|
|
+ public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto parseFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ return newBuilder().mergeFrom(input, extensionRegistry)
|
|
|
+ .buildParsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public static Builder newBuilder() { return Builder.create(); }
|
|
|
+ public Builder newBuilderForType() { return newBuilder(); }
|
|
|
+ public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto prototype) {
|
|
|
+ return newBuilder().mergeFrom(prototype);
|
|
|
+ }
|
|
|
+ public Builder toBuilder() { return newBuilder(this); }
|
|
|
+
|
|
|
+ @java.lang.Override
|
|
|
+ protected Builder newBuilderForType(
|
|
|
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
|
|
|
+ Builder builder = new Builder(parent);
|
|
|
+ return builder;
|
|
|
+ }
|
|
|
+ public static final class Builder extends
|
|
|
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
|
|
|
+ implements org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProtoOrBuilder {
|
|
|
+ public static final com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_descriptor;
|
|
|
+ }
|
|
|
+
|
|
|
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internalGetFieldAccessorTable() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_UpgradeStatusReportProto_fieldAccessorTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.newBuilder()
|
|
|
+ private Builder() {
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Builder(BuilderParent parent) {
|
|
|
+ super(parent);
|
|
|
+ maybeForceBuilderInitialization();
|
|
|
+ }
|
|
|
+ private void maybeForceBuilderInitialization() {
|
|
|
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
|
|
|
+ }
|
|
|
+ }
|
|
|
+ private static Builder create() {
|
|
|
+ return new Builder();
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clear() {
|
|
|
+ super.clear();
|
|
|
+ version_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ upgradeStatus_ = 0;
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder clone() {
|
|
|
+ return create().mergeFrom(buildPartial());
|
|
|
+ }
|
|
|
+
|
|
|
+ public com.google.protobuf.Descriptors.Descriptor
|
|
|
+ getDescriptorForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDescriptor();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto getDefaultInstanceForType() {
|
|
|
+ return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance();
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto build() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(result);
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto buildParsed()
|
|
|
+ throws com.google.protobuf.InvalidProtocolBufferException {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto result = buildPartial();
|
|
|
+ if (!result.isInitialized()) {
|
|
|
+ throw newUninitializedMessageException(
|
|
|
+ result).asInvalidProtocolBufferException();
|
|
|
+ }
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto buildPartial() {
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto(this);
|
|
|
+ int from_bitField0_ = bitField0_;
|
|
|
+ int to_bitField0_ = 0;
|
|
|
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
|
|
|
+ to_bitField0_ |= 0x00000001;
|
|
|
+ }
|
|
|
+ result.version_ = version_;
|
|
|
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
|
|
+ to_bitField0_ |= 0x00000002;
|
|
|
+ }
|
|
|
+ result.upgradeStatus_ = upgradeStatus_;
|
|
|
+ result.bitField0_ = to_bitField0_;
|
|
|
+ onBuilt();
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(com.google.protobuf.Message other) {
|
|
|
+ if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto) {
|
|
|
+ return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto)other);
|
|
|
+ } else {
|
|
|
+ super.mergeFrom(other);
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto other) {
|
|
|
+ if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.getDefaultInstance()) return this;
|
|
|
+ if (other.hasVersion()) {
|
|
|
+ setVersion(other.getVersion());
|
|
|
+ }
|
|
|
+ if (other.hasUpgradeStatus()) {
|
|
|
+ setUpgradeStatus(other.getUpgradeStatus());
|
|
|
+ }
|
|
|
+ this.mergeUnknownFields(other.getUnknownFields());
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ public final boolean isInitialized() {
|
|
|
+ if (!hasVersion()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (!hasUpgradeStatus()) {
|
|
|
+
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ public Builder mergeFrom(
|
|
|
+ com.google.protobuf.CodedInputStream input,
|
|
|
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
|
|
|
+ throws java.io.IOException {
|
|
|
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
|
|
|
+ com.google.protobuf.UnknownFieldSet.newBuilder(
|
|
|
+ this.getUnknownFields());
|
|
|
+ while (true) {
|
|
|
+ int tag = input.readTag();
|
|
|
+ switch (tag) {
|
|
|
+ case 0:
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ default: {
|
|
|
+ if (!parseUnknownField(input, unknownFields,
|
|
|
+ extensionRegistry, tag)) {
|
|
|
+ this.setUnknownFields(unknownFields.build());
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 8: {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ version_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case 16: {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ upgradeStatus_ = input.readUInt32();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private int bitField0_;
|
|
|
+
|
|
|
+ // required uint32 version = 1;
|
|
|
+ private int version_ ;
|
|
|
+ public boolean hasVersion() {
|
|
|
+ return ((bitField0_ & 0x00000001) == 0x00000001);
|
|
|
+ }
|
|
|
+ public int getVersion() {
|
|
|
+ return version_;
|
|
|
+ }
|
|
|
+ public Builder setVersion(int value) {
|
|
|
+ bitField0_ |= 0x00000001;
|
|
|
+ version_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearVersion() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000001);
|
|
|
+ version_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // required uint32 upgradeStatus = 2;
|
|
|
+ private int upgradeStatus_ ;
|
|
|
+ public boolean hasUpgradeStatus() {
|
|
|
+ return ((bitField0_ & 0x00000002) == 0x00000002);
|
|
|
+ }
|
|
|
+ public int getUpgradeStatus() {
|
|
|
+ return upgradeStatus_;
|
|
|
+ }
|
|
|
+ public Builder setUpgradeStatus(int value) {
|
|
|
+ bitField0_ |= 0x00000002;
|
|
|
+ upgradeStatus_ = value;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+ public Builder clearUpgradeStatus() {
|
|
|
+ bitField0_ = (bitField0_ & ~0x00000002);
|
|
|
+ upgradeStatus_ = 0;
|
|
|
+ onChanged();
|
|
|
+ return this;
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(builder_scope:UpgradeStatusReportProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ static {
|
|
|
+ defaultInstance = new UpgradeStatusReportProto(true);
|
|
|
+ defaultInstance.initFields();
|
|
|
+ }
|
|
|
+
|
|
|
+ // @@protoc_insertion_point(class_scope:UpgradeStatusReportProto)
|
|
|
+ }
|
|
|
+
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_ExtendedBlockProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_ExtendedBlockProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_BlockTokenIdentifierProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_BlockTokenIdentifierProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_DatanodeIDProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_DatanodeIDProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_DatanodeInfoProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_DatanodeInfoProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_ContentSummaryProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_ContentSummaryProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_CorruptFileBlocksProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_CorruptFileBlocksProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_FsPermissionProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_FsPermissionProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_LocatedBlockProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_LocatedBlockProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_LocatedBlocksProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_LocatedBlocksProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_HdfsFileStatusProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_HdfsFileStatusProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_FsServerDefaultsProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_FsServerDefaultsProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_DirectoryListingProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_DirectoryListingProto_fieldAccessorTable;
|
|
|
+ private static com.google.protobuf.Descriptors.Descriptor
|
|
|
+ internal_static_UpgradeStatusReportProto_descriptor;
|
|
|
+ private static
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
|
|
|
+ internal_static_UpgradeStatusReportProto_fieldAccessorTable;
|
|
|
+
|
|
|
+ public static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ getDescriptor() {
|
|
|
+ return descriptor;
|
|
|
+ }
|
|
|
+ private static com.google.protobuf.Descriptors.FileDescriptor
|
|
|
+ descriptor;
|
|
|
+ static {
|
|
|
+ java.lang.String[] descriptorData = {
|
|
|
+ "\n\nhdfs.proto\"`\n\022ExtendedBlockProto\022\016\n\006po" +
|
|
|
+ "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\027\n\017generatio" +
|
|
|
+ "nStamp\030\003 \002(\004\022\020\n\010numBytes\030\004 \001(\004\"`\n\031BlockT" +
|
|
|
+ "okenIdentifierProto\022\022\n\nidentifier\030\001 \002(\014\022" +
|
|
|
+ "\020\n\010password\030\002 \002(\014\022\014\n\004kind\030\003 \002(\t\022\017\n\007servi" +
|
|
|
+ "ce\030\004 \002(\t\"U\n\017DatanodeIDProto\022\014\n\004name\030\001 \002(" +
|
|
|
+ "\t\022\021\n\tstorageID\030\002 \002(\t\022\020\n\010infoPort\030\003 \002(\r\022\017" +
|
|
|
+ "\n\007ipcPort\030\004 \002(\r\"\312\002\n\021DatanodeInfoProto\022\034\n" +
|
|
|
+ "\002id\030\001 \002(\0132\020.DatanodeIDProto\022\020\n\010capacity\030" +
|
|
|
+ "\002 \001(\004\022\017\n\007dfsUsed\030\003 \001(\004\022\021\n\tremaining\030\004 \001(",
|
|
|
+ "\004\022\025\n\rblockPoolUsed\030\005 \001(\004\022\022\n\nlastUpdate\030\006" +
|
|
|
+ " \001(\004\022\024\n\014xceiverCount\030\007 \001(\r\022\020\n\010location\030\010" +
|
|
|
+ " \001(\t\022\020\n\010hostName\030\t \001(\t\0221\n\nadminState\030\n \001" +
|
|
|
+ "(\0162\035.DatanodeInfoProto.AdminState\"I\n\nAdm" +
|
|
|
+ "inState\022\n\n\006NORMAL\020\000\022\033\n\027DECOMMISSION_INPR" +
|
|
|
+ "OGRESS\020\001\022\022\n\016DECOMMISSIONED\020\002\"\212\001\n\023Content" +
|
|
|
+ "SummaryProto\022\016\n\006length\030\001 \002(\004\022\021\n\tfileCoun" +
|
|
|
+ "t\030\002 \002(\004\022\026\n\016directoryCount\030\003 \002(\004\022\r\n\005quota" +
|
|
|
+ "\030\004 \002(\004\022\025\n\rspaceConsumed\030\005 \002(\004\022\022\n\nspaceQu" +
|
|
|
+ "ota\030\006 \002(\004\"7\n\026CorruptFileBlocksProto\022\r\n\005f",
|
|
|
+ "iles\030\001 \003(\t\022\016\n\006cookie\030\002 \002(\t\"!\n\021FsPermissi" +
|
|
|
+ "onProto\022\014\n\004perm\030\001 \002(\r\"\246\001\n\021LocatedBlockPr" +
|
|
|
+ "oto\022\036\n\001b\030\001 \002(\0132\023.ExtendedBlockProto\022\016\n\006o" +
|
|
|
+ "ffset\030\002 \002(\004\022 \n\004locs\030\003 \003(\0132\022.DatanodeInfo" +
|
|
|
+ "Proto\022\017\n\007corrupt\030\004 \002(\010\022.\n\nblockToken\030\005 \002" +
|
|
|
+ "(\0132\032.BlockTokenIdentifierProto\"\253\001\n\022Locat" +
|
|
|
+ "edBlocksProto\022\022\n\nfileLength\030\001 \002(\004\022\"\n\006blo" +
|
|
|
+ "cks\030\002 \003(\0132\022.LocatedBlockProto\022\031\n\021underCo" +
|
|
|
+ "nstruction\030\003 \002(\010\022%\n\tlastBlock\030\004 \001(\0132\022.Lo" +
|
|
|
+ "catedBlockProto\022\033\n\023isLastBlockComplete\030\005",
|
|
|
+ " \002(\010\"\366\002\n\023HdfsFileStatusProto\022/\n\010fileType" +
|
|
|
+ "\030\001 \002(\0162\035.HdfsFileStatusProto.FileType\022\014\n" +
|
|
|
+ "\004path\030\002 \002(\014\022\016\n\006length\030\003 \002(\004\022&\n\npermissio" +
|
|
|
+ "n\030\004 \002(\0132\022.FsPermissionProto\022\r\n\005owner\030\005 \002" +
|
|
|
+ "(\t\022\r\n\005group\030\006 \002(\t\022\031\n\021modification_time\030\007" +
|
|
|
+ " \002(\004\022\023\n\013access_time\030\010 \002(\004\022\017\n\007symlink\030\t \001" +
|
|
|
+ "(\014\022\031\n\021block_replication\030\n \001(\r\022\021\n\tblocksi" +
|
|
|
+ "ze\030\013 \001(\004\022&\n\tlocations\030\014 \001(\0132\023.LocatedBlo" +
|
|
|
+ "cksProto\"3\n\010FileType\022\n\n\006IS_DIR\020\001\022\013\n\007IS_F" +
|
|
|
+ "ILE\020\002\022\016\n\nIS_SYMLINK\020\003\"\212\001\n\025FsServerDefaul",
|
|
|
+ "tsProto\022\021\n\tblockSize\030\001 \002(\004\022\030\n\020bytesPerCh" +
|
|
|
+ "ecksum\030\002 \002(\r\022\027\n\017writePacketSize\030\003 \002(\r\022\023\n" +
|
|
|
+ "\013replication\030\004 \002(\r\022\026\n\016fileBufferSize\030\005 \002" +
|
|
|
+ "(\r\"_\n\025DirectoryListingProto\022,\n\016partialLi" +
|
|
|
+ "sting\030\001 \003(\0132\024.HdfsFileStatusProto\022\030\n\020rem" +
|
|
|
+ "ainingEntries\030\002 \002(\r\"B\n\030UpgradeStatusRepo" +
|
|
|
+ "rtProto\022\017\n\007version\030\001 \002(\r\022\025\n\rupgradeStatu" +
|
|
|
+ "s\030\002 \002(\rB6\n%org.apache.hadoop.hdfs.protoc" +
|
|
|
+ "ol.protoB\nHdfsProtos\240\001\001"
|
|
|
+ };
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
|
|
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
|
|
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
|
|
|
+ com.google.protobuf.Descriptors.FileDescriptor root) {
|
|
|
+ descriptor = root;
|
|
|
+ internal_static_ExtendedBlockProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(0);
|
|
|
+ internal_static_ExtendedBlockProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_ExtendedBlockProto_descriptor,
|
|
|
+ new java.lang.String[] { "PoolId", "BlockId", "GenerationStamp", "NumBytes", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
|
|
|
+ internal_static_BlockTokenIdentifierProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(1);
|
|
|
+ internal_static_BlockTokenIdentifierProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_BlockTokenIdentifierProto_descriptor,
|
|
|
+ new java.lang.String[] { "Identifier", "Password", "Kind", "Service", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenIdentifierProto.Builder.class);
|
|
|
+ internal_static_DatanodeIDProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(2);
|
|
|
+ internal_static_DatanodeIDProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_DatanodeIDProto_descriptor,
|
|
|
+ new java.lang.String[] { "Name", "StorageID", "InfoPort", "IpcPort", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
|
|
|
+ internal_static_DatanodeInfoProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(3);
|
|
|
+ internal_static_DatanodeInfoProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_DatanodeInfoProto_descriptor,
|
|
|
+ new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "HostName", "AdminState", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
|
|
|
+ internal_static_ContentSummaryProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(4);
|
|
|
+ internal_static_ContentSummaryProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_ContentSummaryProto_descriptor,
|
|
|
+ new java.lang.String[] { "Length", "FileCount", "DirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
|
|
|
+ internal_static_CorruptFileBlocksProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(5);
|
|
|
+ internal_static_CorruptFileBlocksProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_CorruptFileBlocksProto_descriptor,
|
|
|
+ new java.lang.String[] { "Files", "Cookie", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
|
|
|
+ internal_static_FsPermissionProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(6);
|
|
|
+ internal_static_FsPermissionProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_FsPermissionProto_descriptor,
|
|
|
+ new java.lang.String[] { "Perm", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto.Builder.class);
|
|
|
+ internal_static_LocatedBlockProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(7);
|
|
|
+ internal_static_LocatedBlockProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_LocatedBlockProto_descriptor,
|
|
|
+ new java.lang.String[] { "B", "Offset", "Locs", "Corrupt", "BlockToken", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
|
|
|
+ internal_static_LocatedBlocksProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(8);
|
|
|
+ internal_static_LocatedBlocksProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_LocatedBlocksProto_descriptor,
|
|
|
+ new java.lang.String[] { "FileLength", "Blocks", "UnderConstruction", "LastBlock", "IsLastBlockComplete", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
|
|
|
+ internal_static_HdfsFileStatusProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(9);
|
|
|
+ internal_static_HdfsFileStatusProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_HdfsFileStatusProto_descriptor,
|
|
|
+ new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "Blocksize", "Locations", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
|
|
|
+ internal_static_FsServerDefaultsProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(10);
|
|
|
+ internal_static_FsServerDefaultsProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_FsServerDefaultsProto_descriptor,
|
|
|
+ new java.lang.String[] { "BlockSize", "BytesPerChecksum", "WritePacketSize", "Replication", "FileBufferSize", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
|
|
|
+ internal_static_DirectoryListingProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(11);
|
|
|
+ internal_static_DirectoryListingProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_DirectoryListingProto_descriptor,
|
|
|
+ new java.lang.String[] { "PartialListing", "RemainingEntries", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class);
|
|
|
+ internal_static_UpgradeStatusReportProto_descriptor =
|
|
|
+ getDescriptor().getMessageTypes().get(12);
|
|
|
+ internal_static_UpgradeStatusReportProto_fieldAccessorTable = new
|
|
|
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
|
|
+ internal_static_UpgradeStatusReportProto_descriptor,
|
|
|
+ new java.lang.String[] { "Version", "UpgradeStatus", },
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto.Builder.class);
|
|
|
return null;
|
|
|
}
|
|
|
};
|