123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179 |
- /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- /**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
- option java_package = "org.apache.hadoop.ozone.protocol.proto";
- option java_outer_classname = "StorageContainerLocationProtocolProtos";
- option java_generic_services = true;
- option java_generate_equals_and_hash = true;
- package hadoop.hdfs;
- import "hdfs.proto";
- import "Ozone.proto";
- /**
- * Request send to SCM asking where the container should be created.
- */
- message ContainerRequestProto {
- required string containerName = 1;
- // Ozone only support replciation of either 1 or 3.
- required hadoop.hdfs.ozone.ReplicationFactor replicationFactor = 2;
- required hadoop.hdfs.ozone.ReplicationType replicationType = 3;
- }
- /**
- * Reply from SCM indicating that the container.
- */
- message ContainerResponseProto {
- enum Error {
- success = 1;
- errorContainerAlreadyExists = 2;
- errorContainerMissing = 3;
- }
- required Error errorCode = 1;
- required hadoop.hdfs.ozone.Pipeline pipeline = 2;
- optional string errorMessage = 3;
- }
- message GetContainerRequestProto {
- required string containerName = 1;
- }
- message GetContainerResponseProto {
- required hadoop.hdfs.ozone.Pipeline pipeline = 1;
- }
- message ListContainerRequestProto {
- required uint32 count = 1;
- optional string startName = 2;
- optional string prefixName = 3;
- }
- message ListContainerResponseProto {
- repeated hadoop.hdfs.ozone.Pipeline pipeline = 1;
- }
- message DeleteContainerRequestProto {
- required string containerName = 1;
- }
- message DeleteContainerResponseProto {
- // Empty response
- }
- /*
- NodeQueryRequest sends a request to SCM asking to send a list of nodes that
- match the NodeState that we are requesting.
- */
- message NodeQueryRequestProto {
- // Repeated, So we can specify more than one status type.
- // These NodeState types are additive for now, in the sense that
- // if you specify HEALTHY and FREE_NODE members --
- // Then you get all healthy node which are not raft members.
- //
- // if you specify all healthy and dead nodes, you will get nothing
- // back. Server is not going to dictate what combinations make sense,
- // it is entirely up to the caller.
- // TODO: Support operators like OR and NOT. Currently it is always an
- // implied AND.
- repeated hadoop.hdfs.ozone.NodeState query = 1;
- required hadoop.hdfs.ozone.QueryScope scope = 2;
- optional string poolName = 3; // if scope is pool, then pool name is needed.
- }
- message NodeQueryResponseProto {
- required hadoop.hdfs.ozone.NodePool datanodes = 1;
- }
- /**
- Request to create a replication pipeline.
- */
- message PipelineRequestProto {
- required hadoop.hdfs.ozone.ReplicationType replicationType = 1;
- required hadoop.hdfs.ozone.ReplicationFactor replicationFactor = 2;
- // if datanodes are specified then pipelines are created using those
- // datanodes.
- optional hadoop.hdfs.ozone.NodePool nodePool = 3;
- optional string pipelineID = 4;
- }
- message PipelineResponseProto {
- enum Error {
- success = 1;
- errorPipelineAlreadyExists = 2;
- }
- required Error errorCode = 1;
- optional hadoop.hdfs.ozone.Pipeline pipeline = 2;
- optional string errorMessage = 3;
- }
- /**
- * Protocol used from an HDFS node to StorageContainerManager. See the request
- * and response messages for details of the RPC calls.
- */
- service StorageContainerLocationProtocolService {
- /**
- * Creates a container entry in SCM.
- */
- rpc allocateContainer(ContainerRequestProto) returns (ContainerResponseProto);
- /**
- * Returns the pipeline for a given container.
- */
- rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
- rpc listContainer(ListContainerRequestProto) returns (ListContainerResponseProto);
- /**
- * Deletes a container in SCM.
- */
- rpc deleteContainer(DeleteContainerRequestProto) returns (DeleteContainerResponseProto);
- /**
- * Returns a set of Nodes that meet a criteria.
- */
- rpc queryNode(NodeQueryRequestProto) returns (NodeQueryResponseProto);
- /*
- * Apis that Manage Pipelines.
- *
- * Pipelines are abstractions offered by SCM and Datanode that allows users
- * to create a replication pipeline.
- *
- * These following APIs allow command line programs like SCM CLI to list
- * and manage pipelines.
- */
- /**
- * Creates a replication pipeline.
- */
- rpc allocatePipeline(PipelineRequestProto)
- returns (PipelineResponseProto);
- }
|