123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308 |
- /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- /**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
- option java_package = "org.apache.hadoop.ozone.protocol.proto";
- option java_outer_classname = "StorageContainerDatanodeProtocolProtos";
- option java_generic_services = true;
- option java_generate_equals_and_hash = true;
- package hadoop.hdfs;
- import "hdfs.proto";
- import "HdfsServer.proto";
- import "DatanodeProtocol.proto";
- import "Ozone.proto";
- /**
- * This message is send by data node to indicate that it is alive or it is
- * registering with the node manager.
- */
- message SCMHeartbeatRequestProto {
- required DatanodeIDProto datanodeID = 1;
- optional SCMNodeReport nodeReport = 2;
- optional ReportState containerReportState = 3;
- }
- enum ContainerState {
- closed = 0;
- open = 1;
- }
- /**
- NodeState contains messages from datanode to SCM saying that it has
- some information that SCM might be interested in.*/
- message ReportState {
- enum states {
- noContainerReports = 0;
- completeContinerReport = 1;
- deltaContainerReport = 2;
- }
- required states state = 1;
- required int64 count = 2 [default = 0];
- }
- /**
- This message is used to persist the information about a container in the
- SCM database, This information allows SCM to startup faster and avoid having
- all container info in memory all the time.
- */
- message ContainerPersistanceProto {
- required ContainerState state = 1;
- required hadoop.hdfs.ozone.Pipeline pipeline = 2;
- required ContainerInfo info = 3;
- }
- /**
- This message is used to do a quick look up of which containers are effected
- if a node goes down
- */
- message NodeContianerMapping {
- repeated string contianerName = 1;
- }
- /**
- A container report contains the following information.
- */
- message ContainerInfo {
- required string containerName = 1;
- required string finalhash = 2;
- optional int64 size = 3;
- optional int64 keycount = 4;
- }
- /**
- A set of container reports, max count is generally set to
- 8192 since that keeps the size of the reports under 1 MB.
- */
- message ContainerReportsProto {
- enum reportType {
- fullReport = 0;
- deltaReport = 1;
- }
- repeated ContainerInfo reports = 1;
- required reportType type = 2;
- }
- /**
- * This message is send along with the heart beat to report datanode
- * storage utilization by SCM.
- */
- message SCMNodeReport {
- repeated SCMStorageReport storageReport = 1;
- }
- message SCMStorageReport {
- required string storageUuid = 1;
- optional uint64 capacity = 2 [default = 0];
- optional uint64 scmUsed = 3 [default = 0];
- optional uint64 remaining = 4 [default = 0];
- optional StorageTypeProto storageType = 5 [default = DISK];
- }
- message SCMRegisterRequestProto {
- required DatanodeIDProto datanodeID = 1;
- optional SCMNodeAddressList addressList = 2;
- }
- /**
- * Request for version info of the software stack on the server.
- */
- message SCMVersionRequestProto {
- }
- /**
- * Generic response that is send to a version request. This allows keys to be
- * added on the fly and protocol to remain stable.
- */
- message SCMVersionResponseProto {
- required uint32 softwareVersion = 1;
- repeated hadoop.hdfs.ozone.KeyValue keys = 2;
- }
- message SCMNodeAddressList {
- repeated string addressList = 1;
- }
- /**
- * Datanode ID returned by the SCM. This is similar to name node
- * registeration of a datanode.
- */
- message SCMRegisteredCmdResponseProto {
- enum ErrorCode {
- success = 1;
- errorNodeNotPermitted = 2;
- }
- required ErrorCode errorCode = 2;
- optional string datanodeUUID = 3;
- optional string clusterID = 4;
- optional SCMNodeAddressList addressList = 5;
- }
- /**
- * Container ID maintains the container's Identity along with cluster ID
- * after the registration is done.
- */
- message ContainerNodeIDProto {
- required DatanodeIDProto datanodeID = 1;
- optional string clusterID = 2;
- }
- /**
- This command tells the data node to send in the container report when possible
- */
- message SendContainerReportProto {
- }
- /**
- Type of commands supported by SCM to datanode protocol.
- */
- enum Type {
- versionCommand = 2;
- registeredCommand = 3;
- sendContainerReport = 4;
- }
- /*
- * These are commands returned by SCM for to the datanode to execute.
- */
- message SCMCommandResponseProto {
- required Type cmdType = 2; // Type of the command
- optional SCMRegisteredCmdResponseProto registeredProto = 3;
- optional SCMVersionResponseProto versionProto = 4;
- optional SendContainerReportProto sendReport = 5;
- }
- /*
- * A group of commands for the datanode to execute
- */
- message SCMHeartbeatResponseProto {
- repeated SCMCommandResponseProto commands = 1;
- }
- /**
- * Protocol used from a datanode to StorageContainerManager.
- *
- * Please see the request and response messages for details of the RPC calls.
- *
- * Here is a simple state diagram that shows how a datanode would boot up and
- * communicate with SCM.
- *
- * -----------------------
- * | Start |
- * ---------- ------------
- * |
- * |
- * |
- * |
- * |
- * |
- * |
- * ----------v-------------
- * | Searching for SCM ------------
- * ---------- ------------- |
- * | |
- * | |
- * | ----------v-------------
- * | | Register if needed |
- * | ----------- ------------
- * | |
- * v |
- * ----------- ---------------- |
- * --------- Heartbeat state <--------
- * | --------^-------------------
- * | |
- * | |
- * | |
- * | |
- * | |
- * | |
- * | |
- * ------------------
- *
- *
- *
- * Here is how this protocol is used by the datanode. When a datanode boots up
- * it moves into a stated called SEARCHING_SCM. In this state datanode is
- * trying to establish communication with the SCM. The address of the SCMs are
- * retrieved from the configuration information.
- *
- * In the SEARCHING_SCM state, only rpc call made by datanode is a getVersion
- * call to SCM. Once any of the SCMs reply, datanode checks if it has a local
- * persisted datanode ID. If it has this means that this datanode is already
- * registered with some SCM. If this file is not found, datanode assumes that
- * it needs to do a registration.
- *
- * If registration is need datanode moves into REGISTER state. It will
- * send a register call with datanodeID data structure and presist that info.
- *
- * The response to the command contains clusterID. This information is
- * also persisted by the datanode and moves into heartbeat state.
- *
- * Once in the heartbeat state, datanode sends heartbeats and container reports
- * to SCM and process commands issued by SCM until it is shutdown.
- *
- */
- service StorageContainerDatanodeProtocolService {
- /**
- * Gets the version information from the SCM.
- */
- rpc getVersion (SCMVersionRequestProto) returns (SCMVersionResponseProto);
- /**
- * Registers a data node with SCM.
- */
- rpc register (SCMRegisterRequestProto) returns (SCMRegisteredCmdResponseProto);
- /**
- * Send heartbeat from datanode to SCM. HB's under SCM looks more
- * like life line protocol than HB's under HDFS. In other words, it is
- * extremely light weight and contains no data payload.
- */
- rpc sendHeartbeat (SCMHeartbeatRequestProto) returns (SCMHeartbeatResponseProto);
- /**
- send container reports sends the container report to SCM. This will
- return a null command as response.
- */
- rpc sendContainerReport(ContainerReportsProto) returns (SCMHeartbeatResponseProto);
- }
|