rpc_connection.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #ifndef LIB_RPC_RPC_CONNECTION_H_
  19. #define LIB_RPC_RPC_CONNECTION_H_
  20. #include "rpc_engine.h"
  21. #include "common/auth_info.h"
  22. #include "common/logging.h"
  23. #include "common/util.h"
  24. #include "common/libhdfs_events_impl.h"
  25. #include "sasl_protocol.h"
  26. #include <asio/connect.hpp>
  27. #include <asio/read.hpp>
  28. #include <asio/write.hpp>
  29. #include <system_error>
  30. namespace hdfs {
  31. template <class NextLayer>
  32. class RpcConnectionImpl : public RpcConnection {
  33. public:
  34. RpcConnectionImpl(RpcEngine *engine);
  35. virtual ~RpcConnectionImpl() override;
  36. virtual void Connect(const std::vector<::asio::ip::tcp::endpoint> &server,
  37. const AuthInfo & auth_info,
  38. RpcCallback &handler);
  39. virtual void ConnectAndFlush(
  40. const std::vector<::asio::ip::tcp::endpoint> &server) override;
  41. virtual void SendHandshake(RpcCallback &handler) override;
  42. virtual void SendContext(RpcCallback &handler) override;
  43. virtual void Disconnect() override;
  44. virtual void OnSendCompleted(const ::asio::error_code &ec,
  45. size_t transferred) override;
  46. virtual void OnRecvCompleted(const ::asio::error_code &ec,
  47. size_t transferred) override;
  48. virtual void FlushPendingRequests() override;
  49. NextLayer &next_layer() { return next_layer_; }
  50. void TEST_set_connected(bool connected) { connected_ = connected ? kConnected : kNotYetConnected; }
  51. private:
  52. const Options options_;
  53. ::asio::ip::tcp::endpoint current_endpoint_;
  54. std::vector<::asio::ip::tcp::endpoint> additional_endpoints_;
  55. NextLayer next_layer_;
  56. ::asio::deadline_timer connect_timer_;
  57. void ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint &remote);
  58. };
  59. template <class NextLayer>
  60. RpcConnectionImpl<NextLayer>::RpcConnectionImpl(RpcEngine *engine)
  61. : RpcConnection(engine),
  62. options_(engine->options()),
  63. next_layer_(engine->io_service()),
  64. connect_timer_(engine->io_service())
  65. {
  66. LOG_TRACE(kRPC, << "RpcConnectionImpl::RpcConnectionImpl called &" << (void*)this);
  67. }
  68. template <class NextLayer>
  69. RpcConnectionImpl<NextLayer>::~RpcConnectionImpl() {
  70. LOG_DEBUG(kRPC, << "RpcConnectionImpl::~RpcConnectionImpl called &" << (void*)this);
  71. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  72. if (pending_requests_.size() > 0)
  73. LOG_WARN(kRPC, << "RpcConnectionImpl::~RpcConnectionImpl called with items in the pending queue");
  74. if (requests_on_fly_.size() > 0)
  75. LOG_WARN(kRPC, << "RpcConnectionImpl::~RpcConnectionImpl called with items in the requests_on_fly queue");
  76. }
  77. template <class NextLayer>
  78. void RpcConnectionImpl<NextLayer>::Connect(
  79. const std::vector<::asio::ip::tcp::endpoint> &server,
  80. const AuthInfo & auth_info,
  81. RpcCallback &handler) {
  82. LOG_TRACE(kRPC, << "RpcConnectionImpl::Connect called");
  83. this->auth_info_ = auth_info;
  84. auto connectionSuccessfulReq = std::make_shared<Request>(
  85. engine_, [handler](::google::protobuf::io::CodedInputStream *is,
  86. const Status &status) {
  87. (void)is;
  88. handler(status);
  89. });
  90. pending_requests_.push_back(connectionSuccessfulReq);
  91. this->ConnectAndFlush(server); // need "this" so compiler can infer type of CAF
  92. }
  93. template <class NextLayer>
  94. void RpcConnectionImpl<NextLayer>::ConnectAndFlush(
  95. const std::vector<::asio::ip::tcp::endpoint> &server) {
  96. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  97. if (server.empty()) {
  98. Status s = Status::InvalidArgument("No endpoints provided");
  99. CommsError(s);
  100. return;
  101. }
  102. if (connected_ == kConnected) {
  103. FlushPendingRequests();
  104. return;
  105. }
  106. if (connected_ != kNotYetConnected) {
  107. LOG_WARN(kRPC, << "RpcConnectionImpl::ConnectAndFlush called while connected=" << ToString(connected_));
  108. return;
  109. }
  110. connected_ = kConnecting;
  111. // Take the first endpoint, but remember the alternatives for later
  112. additional_endpoints_ = server;
  113. ::asio::ip::tcp::endpoint first_endpoint = additional_endpoints_.front();
  114. additional_endpoints_.erase(additional_endpoints_.begin());
  115. current_endpoint_ = first_endpoint;
  116. auto shared_this = shared_from_this();
  117. next_layer_.async_connect(first_endpoint, [shared_this, this, first_endpoint](const ::asio::error_code &ec) {
  118. ConnectComplete(ec, first_endpoint);
  119. });
  120. // Prompt the timer to timeout
  121. auto weak_this = std::weak_ptr<RpcConnection>(shared_this);
  122. connect_timer_.expires_from_now(
  123. std::chrono::milliseconds(options_.rpc_connect_timeout));
  124. connect_timer_.async_wait([shared_this, this, first_endpoint](const ::asio::error_code &ec) {
  125. if (ec)
  126. ConnectComplete(ec, first_endpoint);
  127. else
  128. ConnectComplete(make_error_code(asio::error::host_unreachable), first_endpoint);
  129. });
  130. }
  131. template <class NextLayer>
  132. void RpcConnectionImpl<NextLayer>::ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint & remote) {
  133. auto shared_this = RpcConnectionImpl<NextLayer>::shared_from_this();
  134. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  135. connect_timer_.cancel();
  136. LOG_TRACE(kRPC, << "RpcConnectionImpl::ConnectComplete called");
  137. // Could be an old async connect returning a result after we've moved on
  138. if (remote != current_endpoint_) {
  139. LOG_DEBUG(kRPC, << "Got ConnectComplete for " << remote << " but current_endpoint_ is " << current_endpoint_);
  140. return;
  141. }
  142. if (connected_ != kConnecting) {
  143. LOG_DEBUG(kRPC, << "Got ConnectComplete but current state is " << connected_);;
  144. return;
  145. }
  146. Status status = ToStatus(ec);
  147. if(event_handlers_) {
  148. event_response event_resp = event_handlers_->call(FS_NN_CONNECT_EVENT, cluster_name_.c_str(), 0);
  149. #ifndef LIBHDFSPP_SIMULATE_ERROR_DISABLED
  150. if (event_resp.response() == event_response::kTest_Error) {
  151. status = event_resp.status();
  152. }
  153. #endif
  154. }
  155. if (status.ok()) {
  156. StartReading();
  157. SendHandshake([shared_this, this](const Status & s) {
  158. HandshakeComplete(s);
  159. });
  160. } else {
  161. LOG_DEBUG(kRPC, << "Rpc connection failed; err=" << status.ToString());;
  162. std::string err = SafeDisconnect(get_asio_socket_ptr(&next_layer_));
  163. if(!err.empty()) {
  164. LOG_INFO(kRPC, << "Rpc connection failed to connect to endpoint, error closing connection: " << err);
  165. }
  166. if (!additional_endpoints_.empty()) {
  167. // If we have additional endpoints, keep trying until we either run out or
  168. // hit one
  169. ::asio::ip::tcp::endpoint next_endpoint = additional_endpoints_.front();
  170. additional_endpoints_.erase(additional_endpoints_.begin());
  171. current_endpoint_ = next_endpoint;
  172. next_layer_.async_connect(next_endpoint, [shared_this, this, next_endpoint](const ::asio::error_code &ec) {
  173. ConnectComplete(ec, next_endpoint);
  174. });
  175. connect_timer_.expires_from_now(
  176. std::chrono::milliseconds(options_.rpc_connect_timeout));
  177. connect_timer_.async_wait([shared_this, this, next_endpoint](const ::asio::error_code &ec) {
  178. if (ec)
  179. ConnectComplete(ec, next_endpoint);
  180. else
  181. ConnectComplete(make_error_code(asio::error::host_unreachable), next_endpoint);
  182. });
  183. } else {
  184. CommsError(status);
  185. }
  186. }
  187. }
  188. template <class NextLayer>
  189. void RpcConnectionImpl<NextLayer>::SendHandshake(RpcCallback &handler) {
  190. assert(lock_held(connection_state_lock_)); // Must be holding lock before calling
  191. LOG_TRACE(kRPC, << "RpcConnectionImpl::SendHandshake called");
  192. connected_ = kHandshaking;
  193. auto shared_this = shared_from_this();
  194. auto handshake_packet = PrepareHandshakePacket();
  195. ::asio::async_write(next_layer_, asio::buffer(*handshake_packet),
  196. [handshake_packet, handler, shared_this, this](
  197. const ::asio::error_code &ec, size_t) {
  198. Status status = ToStatus(ec);
  199. handler(status);
  200. });
  201. }
  202. template <class NextLayer>
  203. void RpcConnectionImpl<NextLayer>::SendContext(RpcCallback &handler) {
  204. assert(lock_held(connection_state_lock_)); // Must be holding lock before calling
  205. LOG_TRACE(kRPC, << "RpcConnectionImpl::SendContext called");
  206. auto shared_this = shared_from_this();
  207. auto context_packet = PrepareContextPacket();
  208. ::asio::async_write(next_layer_, asio::buffer(*context_packet),
  209. [context_packet, handler, shared_this, this](
  210. const ::asio::error_code &ec, size_t) {
  211. Status status = ToStatus(ec);
  212. handler(status);
  213. });
  214. }
  215. template <class NextLayer>
  216. void RpcConnectionImpl<NextLayer>::OnSendCompleted(const ::asio::error_code &ec,
  217. size_t) {
  218. using std::placeholders::_1;
  219. using std::placeholders::_2;
  220. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  221. LOG_TRACE(kRPC, << "RpcConnectionImpl::OnSendCompleted called");
  222. request_over_the_wire_.reset();
  223. if (ec) {
  224. LOG_WARN(kRPC, << "Network error during RPC write: " << ec.message());
  225. CommsError(ToStatus(ec));
  226. return;
  227. }
  228. FlushPendingRequests();
  229. }
  230. template <class NextLayer>
  231. void RpcConnectionImpl<NextLayer>::FlushPendingRequests() {
  232. using namespace ::std::placeholders;
  233. // Lock should be held
  234. assert(lock_held(connection_state_lock_));
  235. LOG_TRACE(kRPC, << "RpcConnectionImpl::FlushPendingRequests called");
  236. // Don't send if we don't need to
  237. if (request_over_the_wire_) {
  238. return;
  239. }
  240. std::shared_ptr<Request> req;
  241. switch (connected_) {
  242. case kNotYetConnected:
  243. return;
  244. case kConnecting:
  245. return;
  246. case kHandshaking:
  247. return;
  248. case kAuthenticating:
  249. if (auth_requests_.empty()) {
  250. return;
  251. }
  252. req = auth_requests_.front();
  253. auth_requests_.erase(auth_requests_.begin());
  254. break;
  255. case kConnected:
  256. if (pending_requests_.empty()) {
  257. return;
  258. }
  259. req = pending_requests_.front();
  260. pending_requests_.erase(pending_requests_.begin());
  261. break;
  262. case kDisconnected:
  263. LOG_DEBUG(kRPC, << "RpcConnectionImpl::FlushPendingRequests attempted to flush a " << ToString(connected_) << " connection");
  264. return;
  265. default:
  266. LOG_DEBUG(kRPC, << "RpcConnectionImpl::FlushPendingRequests invalid state: " << ToString(connected_));
  267. return;
  268. }
  269. std::shared_ptr<RpcConnection> shared_this = shared_from_this();
  270. auto weak_this = std::weak_ptr<RpcConnection>(shared_this);
  271. auto weak_req = std::weak_ptr<Request>(req);
  272. std::shared_ptr<std::string> payload = std::make_shared<std::string>();
  273. req->GetPacket(payload.get());
  274. if (!payload->empty()) {
  275. assert(requests_on_fly_.find(req->call_id()) == requests_on_fly_.end());
  276. requests_on_fly_[req->call_id()] = req;
  277. request_over_the_wire_ = req;
  278. req->timer().expires_from_now(
  279. std::chrono::milliseconds(options_.rpc_timeout));
  280. req->timer().async_wait([weak_this, weak_req, this](const ::asio::error_code &ec) {
  281. auto timeout_this = weak_this.lock();
  282. auto timeout_req = weak_req.lock();
  283. if (timeout_this && timeout_req)
  284. this->HandleRpcTimeout(timeout_req, ec);
  285. });
  286. asio::async_write(next_layer_, asio::buffer(*payload),
  287. [shared_this, this, payload](const ::asio::error_code &ec,
  288. size_t size) {
  289. OnSendCompleted(ec, size);
  290. });
  291. } else { // Nothing to send for this request, inform the handler immediately
  292. io_service().post(
  293. // Never hold locks when calling a callback
  294. [req]() { req->OnResponseArrived(nullptr, Status::OK()); }
  295. );
  296. // Reschedule to flush the next one
  297. AsyncFlushPendingRequests();
  298. }
  299. }
  300. template <class NextLayer>
  301. void RpcConnectionImpl<NextLayer>::OnRecvCompleted(const ::asio::error_code &original_ec,
  302. size_t) {
  303. using std::placeholders::_1;
  304. using std::placeholders::_2;
  305. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  306. ::asio::error_code my_ec(original_ec);
  307. LOG_TRACE(kRPC, << "RpcConnectionImpl::OnRecvCompleted called");
  308. std::shared_ptr<RpcConnection> shared_this = shared_from_this();
  309. if(event_handlers_) {
  310. event_response event_resp = event_handlers_->call(FS_NN_READ_EVENT, cluster_name_.c_str(), 0);
  311. #ifndef LIBHDFSPP_SIMULATE_ERROR_DISABLED
  312. if (event_resp.response() == event_response::kTest_Error) {
  313. my_ec = std::make_error_code(std::errc::network_down);
  314. }
  315. #endif
  316. }
  317. switch (my_ec.value()) {
  318. case 0:
  319. // No errors
  320. break;
  321. case asio::error::operation_aborted:
  322. // The event loop has been shut down. Ignore the error.
  323. return;
  324. default:
  325. LOG_WARN(kRPC, << "Network error during RPC read: " << my_ec.message());
  326. CommsError(ToStatus(my_ec));
  327. return;
  328. }
  329. if (!current_response_state_) { /* start a new one */
  330. current_response_state_ = std::make_shared<Response>();
  331. }
  332. if (current_response_state_->state_ == Response::kReadLength) {
  333. current_response_state_->state_ = Response::kReadContent;
  334. auto buf = ::asio::buffer(reinterpret_cast<char *>(&current_response_state_->length_),
  335. sizeof(current_response_state_->length_));
  336. asio::async_read(
  337. next_layer_, buf,
  338. [shared_this, this](const ::asio::error_code &ec, size_t size) {
  339. OnRecvCompleted(ec, size);
  340. });
  341. } else if (current_response_state_->state_ == Response::kReadContent) {
  342. current_response_state_->state_ = Response::kParseResponse;
  343. current_response_state_->length_ = ntohl(current_response_state_->length_);
  344. current_response_state_->data_.resize(current_response_state_->length_);
  345. asio::async_read(
  346. next_layer_, ::asio::buffer(current_response_state_->data_),
  347. [shared_this, this](const ::asio::error_code &ec, size_t size) {
  348. OnRecvCompleted(ec, size);
  349. });
  350. } else if (current_response_state_->state_ == Response::kParseResponse) {
  351. HandleRpcResponse(current_response_state_);
  352. current_response_state_ = nullptr;
  353. StartReading();
  354. }
  355. }
  356. template <class NextLayer>
  357. void RpcConnectionImpl<NextLayer>::Disconnect() {
  358. assert(lock_held(connection_state_lock_)); // Must be holding lock before calling
  359. LOG_INFO(kRPC, << "RpcConnectionImpl::Disconnect called");
  360. request_over_the_wire_.reset();
  361. if (connected_ == kConnecting || connected_ == kHandshaking || connected_ == kAuthenticating || connected_ == kConnected) {
  362. // Don't print out errors, we were expecting a disconnect here
  363. SafeDisconnect(get_asio_socket_ptr(&next_layer_));
  364. }
  365. connected_ = kDisconnected;
  366. }
  367. }
  368. #endif