StorageContainerLocationProtocol.proto 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /**
  19. * These .proto interfaces are private and unstable.
  20. * Please see http://wiki.apache.org/hadoop/Compatibility
  21. * for what changes are allowed for a *unstable* .proto interface.
  22. */
  23. option java_package = "org.apache.hadoop.ozone.protocol.proto";
  24. option java_outer_classname = "StorageContainerLocationProtocolProtos";
  25. option java_generic_services = true;
  26. option java_generate_equals_and_hash = true;
  27. package hadoop.hdfs;
  28. import "hdfs.proto";
  29. import "Ozone.proto";
  30. /**
  31. * Request send to SCM asking where the container should be created.
  32. */
  33. message ContainerRequestProto {
  34. required string containerName = 1;
  35. // Ozone only support replciation of either 1 or 3.
  36. required hadoop.hdfs.ozone.ReplicationFactor replicationFactor = 2;
  37. required hadoop.hdfs.ozone.ReplicationType replicationType = 3;
  38. }
  39. /**
  40. * Reply from SCM indicating that the container.
  41. */
  42. message ContainerResponseProto {
  43. enum Error {
  44. success = 1;
  45. errorContainerAlreadyExists = 2;
  46. errorContainerMissing = 3;
  47. }
  48. required Error errorCode = 1;
  49. required hadoop.hdfs.ozone.Pipeline pipeline = 2;
  50. optional string errorMessage = 3;
  51. }
  52. message GetContainerRequestProto {
  53. required string containerName = 1;
  54. }
  55. message GetContainerResponseProto {
  56. required hadoop.hdfs.ozone.Pipeline pipeline = 1;
  57. }
  58. message ListContainerRequestProto {
  59. required uint32 count = 1;
  60. optional string startName = 2;
  61. optional string prefixName = 3;
  62. }
  63. message ListContainerResponseProto {
  64. repeated hadoop.hdfs.ozone.Pipeline pipeline = 1;
  65. }
  66. message DeleteContainerRequestProto {
  67. required string containerName = 1;
  68. }
  69. message DeleteContainerResponseProto {
  70. // Empty response
  71. }
  72. /*
  73. NodeQueryRequest sends a request to SCM asking to send a list of nodes that
  74. match the NodeState that we are requesting.
  75. */
  76. message NodeQueryRequestProto {
  77. // Repeated, So we can specify more than one status type.
  78. // These NodeState types are additive for now, in the sense that
  79. // if you specify HEALTHY and FREE_NODE members --
  80. // Then you get all healthy node which are not raft members.
  81. //
  82. // if you specify all healthy and dead nodes, you will get nothing
  83. // back. Server is not going to dictate what combinations make sense,
  84. // it is entirely up to the caller.
  85. // TODO: Support operators like OR and NOT. Currently it is always an
  86. // implied AND.
  87. repeated hadoop.hdfs.ozone.NodeState query = 1;
  88. required hadoop.hdfs.ozone.QueryScope scope = 2;
  89. optional string poolName = 3; // if scope is pool, then pool name is needed.
  90. }
  91. message NodeQueryResponseProto {
  92. required hadoop.hdfs.ozone.NodePool datanodes = 1;
  93. }
  94. /**
  95. Request to create a replication pipeline.
  96. */
  97. message PipelineRequestProto {
  98. required hadoop.hdfs.ozone.ReplicationType replicationType = 1;
  99. required hadoop.hdfs.ozone.ReplicationFactor replicationFactor = 2;
  100. // if datanodes are specified then pipelines are created using those
  101. // datanodes.
  102. optional hadoop.hdfs.ozone.NodePool nodePool = 3;
  103. optional string pipelineID = 4;
  104. }
  105. message PipelineResponseProto {
  106. enum Error {
  107. success = 1;
  108. errorPipelineAlreadyExists = 2;
  109. }
  110. required Error errorCode = 1;
  111. optional hadoop.hdfs.ozone.Pipeline pipeline = 2;
  112. optional string errorMessage = 3;
  113. }
  114. /**
  115. * Protocol used from an HDFS node to StorageContainerManager. See the request
  116. * and response messages for details of the RPC calls.
  117. */
  118. service StorageContainerLocationProtocolService {
  119. /**
  120. * Creates a container entry in SCM.
  121. */
  122. rpc allocateContainer(ContainerRequestProto) returns (ContainerResponseProto);
  123. /**
  124. * Returns the pipeline for a given container.
  125. */
  126. rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
  127. rpc listContainer(ListContainerRequestProto) returns (ListContainerResponseProto);
  128. /**
  129. * Deletes a container in SCM.
  130. */
  131. rpc deleteContainer(DeleteContainerRequestProto) returns (DeleteContainerResponseProto);
  132. /**
  133. * Returns a set of Nodes that meet a criteria.
  134. */
  135. rpc queryNode(NodeQueryRequestProto) returns (NodeQueryResponseProto);
  136. /*
  137. * Apis that Manage Pipelines.
  138. *
  139. * Pipelines are abstractions offered by SCM and Datanode that allows users
  140. * to create a replication pipeline.
  141. *
  142. * These following APIs allow command line programs like SCM CLI to list
  143. * and manage pipelines.
  144. */
  145. /**
  146. * Creates a replication pipeline.
  147. */
  148. rpc allocatePipeline(PipelineRequestProto)
  149. returns (PipelineResponseProto);
  150. }