datatransfer.proto 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /**
  19. * These .proto interfaces are private and stable.
  20. * Please see http://wiki.apache.org/hadoop/Compatibility
  21. * for what changes are allowed for a *stable* .proto interface.
  22. */
  23. // This file contains protocol buffers that are used to transfer data
  24. // to and from the datanode, as well as between datanodes.
  25. option java_package = "org.apache.hadoop.hdfs.protocol.proto";
  26. option java_outer_classname = "DataTransferProtos";
  27. option java_generate_equals_and_hash = true;
  28. package hadoop.hdfs;
  29. import "Security.proto";
  30. import "hdfs.proto";
  31. message DataTransferEncryptorMessageProto {
  32. enum DataTransferEncryptorStatus {
  33. SUCCESS = 0;
  34. ERROR_UNKNOWN_KEY = 1;
  35. ERROR = 2;
  36. }
  37. required DataTransferEncryptorStatus status = 1;
  38. optional bytes payload = 2;
  39. optional string message = 3;
  40. }
  41. message BaseHeaderProto {
  42. required ExtendedBlockProto block = 1;
  43. optional hadoop.common.TokenProto token = 2;
  44. }
  45. message ClientOperationHeaderProto {
  46. required BaseHeaderProto baseHeader = 1;
  47. required string clientName = 2;
  48. }
  49. message CachingStrategyProto {
  50. optional bool dropBehind = 1;
  51. optional int64 readahead = 2;
  52. }
  53. message OpReadBlockProto {
  54. required ClientOperationHeaderProto header = 1;
  55. required uint64 offset = 2;
  56. required uint64 len = 3;
  57. optional bool sendChecksums = 4 [default = true];
  58. optional CachingStrategyProto cachingStrategy = 5;
  59. }
  60. message ChecksumProto {
  61. required ChecksumTypeProto type = 1;
  62. required uint32 bytesPerChecksum = 2;
  63. }
  64. message OpWriteBlockProto {
  65. required ClientOperationHeaderProto header = 1;
  66. repeated DatanodeInfoProto targets = 2;
  67. optional DatanodeInfoProto source = 3;
  68. enum BlockConstructionStage {
  69. PIPELINE_SETUP_APPEND = 0;
  70. // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
  71. PIPELINE_SETUP_APPEND_RECOVERY = 1;
  72. // data streaming
  73. DATA_STREAMING = 2;
  74. // pipeline setup for failed data streaming recovery
  75. PIPELINE_SETUP_STREAMING_RECOVERY = 3;
  76. // close the block and pipeline
  77. PIPELINE_CLOSE = 4;
  78. // Recover a failed PIPELINE_CLOSE
  79. PIPELINE_CLOSE_RECOVERY = 5;
  80. // pipeline set up for block creation
  81. PIPELINE_SETUP_CREATE = 6;
  82. // transfer RBW for adding datanodes
  83. TRANSFER_RBW = 7;
  84. // transfer Finalized for adding datanodes
  85. TRANSFER_FINALIZED = 8;
  86. }
  87. required BlockConstructionStage stage = 4;
  88. required uint32 pipelineSize = 5;
  89. required uint64 minBytesRcvd = 6;
  90. required uint64 maxBytesRcvd = 7;
  91. required uint64 latestGenerationStamp = 8;
  92. /**
  93. * The requested checksum mechanism for this block write.
  94. */
  95. required ChecksumProto requestedChecksum = 9;
  96. optional CachingStrategyProto cachingStrategy = 10;
  97. }
  98. message OpTransferBlockProto {
  99. required ClientOperationHeaderProto header = 1;
  100. repeated DatanodeInfoProto targets = 2;
  101. }
  102. message OpReplaceBlockProto {
  103. required BaseHeaderProto header = 1;
  104. required string delHint = 2;
  105. required DatanodeInfoProto source = 3;
  106. }
  107. message OpCopyBlockProto {
  108. required BaseHeaderProto header = 1;
  109. }
  110. message OpBlockChecksumProto {
  111. required BaseHeaderProto header = 1;
  112. }
  113. message OpRequestShortCircuitAccessProto {
  114. required BaseHeaderProto header = 1;
  115. /** In order to get short-circuit access to block data, clients must set this
  116. * to the highest version of the block data that they can understand.
  117. * Currently 1 is the only version, but more versions may exist in the future
  118. * if the on-disk format changes.
  119. */
  120. required uint32 maxVersion = 2;
  121. }
  122. message PacketHeaderProto {
  123. // All fields must be fixed-length!
  124. required sfixed64 offsetInBlock = 1;
  125. required sfixed64 seqno = 2;
  126. required bool lastPacketInBlock = 3;
  127. required sfixed32 dataLen = 4;
  128. optional bool syncBlock = 5 [default = false];
  129. }
  130. enum Status {
  131. SUCCESS = 0;
  132. ERROR = 1;
  133. ERROR_CHECKSUM = 2;
  134. ERROR_INVALID = 3;
  135. ERROR_EXISTS = 4;
  136. ERROR_ACCESS_TOKEN = 5;
  137. CHECKSUM_OK = 6;
  138. ERROR_UNSUPPORTED = 7;
  139. }
  140. message PipelineAckProto {
  141. required sint64 seqno = 1;
  142. repeated Status status = 2;
  143. optional uint64 downstreamAckTimeNanos = 3 [default = 0];
  144. }
  145. /**
  146. * Sent as part of the BlockOpResponseProto
  147. * for READ_BLOCK and COPY_BLOCK operations.
  148. */
  149. message ReadOpChecksumInfoProto {
  150. required ChecksumProto checksum = 1;
  151. /**
  152. * The offset into the block at which the first packet
  153. * will start. This is necessary since reads will align
  154. * backwards to a checksum chunk boundary.
  155. */
  156. required uint64 chunkOffset = 2;
  157. }
  158. message BlockOpResponseProto {
  159. required Status status = 1;
  160. optional string firstBadLink = 2;
  161. optional OpBlockChecksumResponseProto checksumResponse = 3;
  162. optional ReadOpChecksumInfoProto readOpChecksumInfo = 4;
  163. /** explanatory text which may be useful to log on the client side */
  164. optional string message = 5;
  165. /** If the server chooses to agree to the request of a client for
  166. * short-circuit access, it will send a response message with the relevant
  167. * file descriptors attached.
  168. *
  169. * In the body of the message, this version number will be set to the
  170. * specific version number of the block data that the client is about to
  171. * read.
  172. */
  173. optional uint32 shortCircuitAccessVersion = 6;
  174. }
  175. /**
  176. * Message sent from the client to the DN after reading the entire
  177. * read request.
  178. */
  179. message ClientReadStatusProto {
  180. required Status status = 1;
  181. }
  182. message DNTransferAckProto {
  183. required Status status = 1;
  184. }
  185. message OpBlockChecksumResponseProto {
  186. required uint32 bytesPerCrc = 1;
  187. required uint64 crcPerBlock = 2;
  188. required bytes md5 = 3;
  189. optional ChecksumTypeProto crcType = 4;
  190. }