rpc_connection.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #ifndef LIB_RPC_RPC_CONNECTION_H_
  19. #define LIB_RPC_RPC_CONNECTION_H_
  20. #include "rpc_engine.h"
  21. #include "common/auth_info.h"
  22. #include "common/logging.h"
  23. #include "common/util.h"
  24. #include "common/libhdfs_events_impl.h"
  25. #include "sasl_protocol.h"
  26. #include <asio/connect.hpp>
  27. #include <asio/read.hpp>
  28. #include <asio/write.hpp>
  29. #include <system_error>
  30. namespace hdfs {
  31. template <class NextLayer>
  32. class RpcConnectionImpl : public RpcConnection {
  33. public:
  34. RpcConnectionImpl(RpcEngine *engine);
  35. virtual ~RpcConnectionImpl() override;
  36. virtual void Connect(const std::vector<::asio::ip::tcp::endpoint> &server,
  37. const AuthInfo & auth_info,
  38. RpcCallback &handler);
  39. virtual void ConnectAndFlush(
  40. const std::vector<::asio::ip::tcp::endpoint> &server) override;
  41. virtual void SendHandshake(RpcCallback &handler) override;
  42. virtual void SendContext(RpcCallback &handler) override;
  43. virtual void Disconnect() override;
  44. virtual void OnSendCompleted(const ::asio::error_code &ec,
  45. size_t transferred) override;
  46. virtual void OnRecvCompleted(const ::asio::error_code &ec,
  47. size_t transferred) override;
  48. virtual void FlushPendingRequests() override;
  49. NextLayer &next_layer() { return next_layer_; }
  50. void TEST_set_connected(bool connected) { connected_ = connected ? kConnected : kNotYetConnected; }
  51. private:
  52. const Options options_;
  53. ::asio::ip::tcp::endpoint current_endpoint_;
  54. std::vector<::asio::ip::tcp::endpoint> additional_endpoints_;
  55. NextLayer next_layer_;
  56. ::asio::deadline_timer connect_timer_;
  57. void ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint &remote);
  58. };
  59. template <class NextLayer>
  60. RpcConnectionImpl<NextLayer>::RpcConnectionImpl(RpcEngine *engine)
  61. : RpcConnection(engine),
  62. options_(engine->options()),
  63. next_layer_(engine->io_service()),
  64. connect_timer_(engine->io_service())
  65. {
  66. LOG_TRACE(kRPC, << "RpcConnectionImpl::RpcConnectionImpl called &" << (void*)this);
  67. }
  68. template <class NextLayer>
  69. RpcConnectionImpl<NextLayer>::~RpcConnectionImpl() {
  70. LOG_DEBUG(kRPC, << "RpcConnectionImpl::~RpcConnectionImpl called &" << (void*)this);
  71. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  72. if (pending_requests_.size() > 0)
  73. LOG_WARN(kRPC, << "RpcConnectionImpl::~RpcConnectionImpl called with items in the pending queue");
  74. if (requests_on_fly_.size() > 0)
  75. LOG_WARN(kRPC, << "RpcConnectionImpl::~RpcConnectionImpl called with items in the requests_on_fly queue");
  76. }
  77. template <class NextLayer>
  78. void RpcConnectionImpl<NextLayer>::Connect(
  79. const std::vector<::asio::ip::tcp::endpoint> &server,
  80. const AuthInfo & auth_info,
  81. RpcCallback &handler) {
  82. LOG_TRACE(kRPC, << "RpcConnectionImpl::Connect called");
  83. this->auth_info_ = auth_info;
  84. auto connectionSuccessfulReq = std::make_shared<Request>(
  85. engine_, [handler](::google::protobuf::io::CodedInputStream *is,
  86. const Status &status) {
  87. (void)is;
  88. handler(status);
  89. });
  90. pending_requests_.push_back(connectionSuccessfulReq);
  91. this->ConnectAndFlush(server); // need "this" so compiler can infer type of CAF
  92. }
  93. template <class NextLayer>
  94. void RpcConnectionImpl<NextLayer>::ConnectAndFlush(
  95. const std::vector<::asio::ip::tcp::endpoint> &server) {
  96. LOG_INFO(kRPC, << "ConnectAndFlush called");
  97. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  98. if (server.empty()) {
  99. Status s = Status::InvalidArgument("No endpoints provided");
  100. CommsError(s);
  101. return;
  102. }
  103. if (connected_ == kConnected) {
  104. FlushPendingRequests();
  105. return;
  106. }
  107. if (connected_ != kNotYetConnected) {
  108. LOG_WARN(kRPC, << "RpcConnectionImpl::ConnectAndFlush called while connected=" << ToString(connected_));
  109. return;
  110. }
  111. connected_ = kConnecting;
  112. // Take the first endpoint, but remember the alternatives for later
  113. additional_endpoints_ = server;
  114. ::asio::ip::tcp::endpoint first_endpoint = additional_endpoints_.front();
  115. additional_endpoints_.erase(additional_endpoints_.begin());
  116. current_endpoint_ = first_endpoint;
  117. auto shared_this = shared_from_this();
  118. next_layer_.async_connect(first_endpoint, [shared_this, this, first_endpoint](const ::asio::error_code &ec) {
  119. ConnectComplete(ec, first_endpoint);
  120. });
  121. // Prompt the timer to timeout
  122. auto weak_this = std::weak_ptr<RpcConnection>(shared_this);
  123. connect_timer_.expires_from_now(
  124. std::chrono::milliseconds(options_.rpc_connect_timeout));
  125. connect_timer_.async_wait([shared_this, this, first_endpoint](const ::asio::error_code &ec) {
  126. if (ec)
  127. ConnectComplete(ec, first_endpoint);
  128. else
  129. ConnectComplete(make_error_code(asio::error::host_unreachable), first_endpoint);
  130. });
  131. }
  132. template <class NextLayer>
  133. void RpcConnectionImpl<NextLayer>::ConnectComplete(const ::asio::error_code &ec, const ::asio::ip::tcp::endpoint & remote) {
  134. auto shared_this = RpcConnectionImpl<NextLayer>::shared_from_this();
  135. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  136. connect_timer_.cancel();
  137. LOG_TRACE(kRPC, << "RpcConnectionImpl::ConnectComplete called");
  138. // Could be an old async connect returning a result after we've moved on
  139. if (remote != current_endpoint_) {
  140. LOG_DEBUG(kRPC, << "Got ConnectComplete for " << remote << " but current_endpoint_ is " << current_endpoint_);
  141. return;
  142. }
  143. if (connected_ != kConnecting) {
  144. LOG_DEBUG(kRPC, << "Got ConnectComplete but current state is " << connected_);;
  145. return;
  146. }
  147. Status status = ToStatus(ec);
  148. if(event_handlers_) {
  149. event_response event_resp = event_handlers_->call(FS_NN_CONNECT_EVENT, cluster_name_.c_str(), 0);
  150. #ifndef LIBHDFSPP_SIMULATE_ERROR_DISABLED
  151. if (event_resp.response() == event_response::kTest_Error) {
  152. status = event_resp.status();
  153. }
  154. #endif
  155. }
  156. if (status.ok()) {
  157. StartReading();
  158. SendHandshake([shared_this, this](const Status & s) {
  159. HandshakeComplete(s);
  160. });
  161. } else {
  162. LOG_DEBUG(kRPC, << "Rpc connection failed; err=" << status.ToString());;
  163. std::string err = SafeDisconnect(get_asio_socket_ptr(&next_layer_));
  164. if(!err.empty()) {
  165. LOG_INFO(kRPC, << "Rpc connection failed to connect to endpoint, error closing connection: " << err);
  166. }
  167. if (!additional_endpoints_.empty()) {
  168. // If we have additional endpoints, keep trying until we either run out or
  169. // hit one
  170. ::asio::ip::tcp::endpoint next_endpoint = additional_endpoints_.front();
  171. additional_endpoints_.erase(additional_endpoints_.begin());
  172. current_endpoint_ = next_endpoint;
  173. next_layer_.async_connect(next_endpoint, [shared_this, this, next_endpoint](const ::asio::error_code &ec) {
  174. ConnectComplete(ec, next_endpoint);
  175. });
  176. connect_timer_.expires_from_now(
  177. std::chrono::milliseconds(options_.rpc_connect_timeout));
  178. connect_timer_.async_wait([shared_this, this, next_endpoint](const ::asio::error_code &ec) {
  179. if (ec)
  180. ConnectComplete(ec, next_endpoint);
  181. else
  182. ConnectComplete(make_error_code(asio::error::host_unreachable), next_endpoint);
  183. });
  184. } else {
  185. CommsError(status);
  186. }
  187. }
  188. }
  189. template <class NextLayer>
  190. void RpcConnectionImpl<NextLayer>::SendHandshake(RpcCallback &handler) {
  191. assert(lock_held(connection_state_lock_)); // Must be holding lock before calling
  192. LOG_TRACE(kRPC, << "RpcConnectionImpl::SendHandshake called");
  193. connected_ = kHandshaking;
  194. auto shared_this = shared_from_this();
  195. auto handshake_packet = PrepareHandshakePacket();
  196. ::asio::async_write(next_layer_, asio::buffer(*handshake_packet),
  197. [handshake_packet, handler, shared_this, this](
  198. const ::asio::error_code &ec, size_t) {
  199. Status status = ToStatus(ec);
  200. handler(status);
  201. });
  202. }
  203. template <class NextLayer>
  204. void RpcConnectionImpl<NextLayer>::SendContext(RpcCallback &handler) {
  205. assert(lock_held(connection_state_lock_)); // Must be holding lock before calling
  206. LOG_TRACE(kRPC, << "RpcConnectionImpl::SendContext called");
  207. auto shared_this = shared_from_this();
  208. auto context_packet = PrepareContextPacket();
  209. ::asio::async_write(next_layer_, asio::buffer(*context_packet),
  210. [context_packet, handler, shared_this, this](
  211. const ::asio::error_code &ec, size_t) {
  212. Status status = ToStatus(ec);
  213. handler(status);
  214. });
  215. }
  216. template <class NextLayer>
  217. void RpcConnectionImpl<NextLayer>::OnSendCompleted(const ::asio::error_code &ec,
  218. size_t) {
  219. using std::placeholders::_1;
  220. using std::placeholders::_2;
  221. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  222. LOG_TRACE(kRPC, << "RpcConnectionImpl::OnSendCompleted called");
  223. request_over_the_wire_.reset();
  224. if (ec) {
  225. LOG_WARN(kRPC, << "Network error during RPC write: " << ec.message());
  226. CommsError(ToStatus(ec));
  227. return;
  228. }
  229. FlushPendingRequests();
  230. }
  231. template <class NextLayer>
  232. void RpcConnectionImpl<NextLayer>::FlushPendingRequests() {
  233. using namespace ::std::placeholders;
  234. // Lock should be held
  235. assert(lock_held(connection_state_lock_));
  236. LOG_TRACE(kRPC, << "RpcConnectionImpl::FlushPendingRequests called");
  237. // Don't send if we don't need to
  238. if (request_over_the_wire_) {
  239. return;
  240. }
  241. std::shared_ptr<Request> req;
  242. switch (connected_) {
  243. case kNotYetConnected:
  244. return;
  245. case kConnecting:
  246. return;
  247. case kHandshaking:
  248. return;
  249. case kAuthenticating:
  250. if (auth_requests_.empty()) {
  251. return;
  252. }
  253. req = auth_requests_.front();
  254. auth_requests_.erase(auth_requests_.begin());
  255. break;
  256. case kConnected:
  257. if (pending_requests_.empty()) {
  258. return;
  259. }
  260. req = pending_requests_.front();
  261. pending_requests_.erase(pending_requests_.begin());
  262. break;
  263. case kDisconnected:
  264. LOG_DEBUG(kRPC, << "RpcConnectionImpl::FlushPendingRequests attempted to flush a " << ToString(connected_) << " connection");
  265. return;
  266. default:
  267. LOG_DEBUG(kRPC, << "RpcConnectionImpl::FlushPendingRequests invalid state: " << ToString(connected_));
  268. return;
  269. }
  270. std::shared_ptr<RpcConnection> shared_this = shared_from_this();
  271. auto weak_this = std::weak_ptr<RpcConnection>(shared_this);
  272. auto weak_req = std::weak_ptr<Request>(req);
  273. std::shared_ptr<std::string> payload = std::make_shared<std::string>();
  274. req->GetPacket(payload.get());
  275. if (!payload->empty()) {
  276. assert(requests_on_fly_.find(req->call_id()) == requests_on_fly_.end());
  277. requests_on_fly_[req->call_id()] = req;
  278. request_over_the_wire_ = req;
  279. req->timer().expires_from_now(
  280. std::chrono::milliseconds(options_.rpc_timeout));
  281. req->timer().async_wait([weak_this, weak_req, this](const ::asio::error_code &ec) {
  282. auto timeout_this = weak_this.lock();
  283. auto timeout_req = weak_req.lock();
  284. if (timeout_this && timeout_req)
  285. this->HandleRpcTimeout(timeout_req, ec);
  286. });
  287. asio::async_write(next_layer_, asio::buffer(*payload),
  288. [shared_this, this, payload](const ::asio::error_code &ec,
  289. size_t size) {
  290. OnSendCompleted(ec, size);
  291. });
  292. } else { // Nothing to send for this request, inform the handler immediately
  293. io_service().post(
  294. // Never hold locks when calling a callback
  295. [req]() { req->OnResponseArrived(nullptr, Status::OK()); }
  296. );
  297. // Reschedule to flush the next one
  298. AsyncFlushPendingRequests();
  299. }
  300. }
  301. template <class NextLayer>
  302. void RpcConnectionImpl<NextLayer>::OnRecvCompleted(const ::asio::error_code &original_ec,
  303. size_t) {
  304. using std::placeholders::_1;
  305. using std::placeholders::_2;
  306. std::lock_guard<std::mutex> state_lock(connection_state_lock_);
  307. ::asio::error_code my_ec(original_ec);
  308. LOG_TRACE(kRPC, << "RpcConnectionImpl::OnRecvCompleted called");
  309. std::shared_ptr<RpcConnection> shared_this = shared_from_this();
  310. if(event_handlers_) {
  311. event_response event_resp = event_handlers_->call(FS_NN_READ_EVENT, cluster_name_.c_str(), 0);
  312. #ifndef LIBHDFSPP_SIMULATE_ERROR_DISABLED
  313. if (event_resp.response() == event_response::kTest_Error) {
  314. my_ec = std::make_error_code(std::errc::network_down);
  315. }
  316. #endif
  317. }
  318. switch (my_ec.value()) {
  319. case 0:
  320. // No errors
  321. break;
  322. case asio::error::operation_aborted:
  323. // The event loop has been shut down. Ignore the error.
  324. return;
  325. default:
  326. LOG_WARN(kRPC, << "Network error during RPC read: " << my_ec.message());
  327. CommsError(ToStatus(my_ec));
  328. return;
  329. }
  330. if (!current_response_state_) { /* start a new one */
  331. current_response_state_ = std::make_shared<Response>();
  332. }
  333. if (current_response_state_->state_ == Response::kReadLength) {
  334. current_response_state_->state_ = Response::kReadContent;
  335. auto buf = ::asio::buffer(reinterpret_cast<char *>(&current_response_state_->length_),
  336. sizeof(current_response_state_->length_));
  337. asio::async_read(
  338. next_layer_, buf,
  339. [shared_this, this](const ::asio::error_code &ec, size_t size) {
  340. OnRecvCompleted(ec, size);
  341. });
  342. } else if (current_response_state_->state_ == Response::kReadContent) {
  343. current_response_state_->state_ = Response::kParseResponse;
  344. current_response_state_->length_ = ntohl(current_response_state_->length_);
  345. current_response_state_->data_.resize(current_response_state_->length_);
  346. asio::async_read(
  347. next_layer_, ::asio::buffer(current_response_state_->data_),
  348. [shared_this, this](const ::asio::error_code &ec, size_t size) {
  349. OnRecvCompleted(ec, size);
  350. });
  351. } else if (current_response_state_->state_ == Response::kParseResponse) {
  352. // Check return status from the RPC response. We may have received a msg
  353. // indicating a server side error.
  354. Status stat = HandleRpcResponse(current_response_state_);
  355. if(stat.get_server_exception_type() == Status::kStandbyException) {
  356. // May need to bail out, connect to new NN, and restart loop
  357. LOG_INFO(kRPC, << "Communicating with standby NN, attempting to reconnect");
  358. }
  359. current_response_state_ = nullptr;
  360. StartReading();
  361. }
  362. }
  363. template <class NextLayer>
  364. void RpcConnectionImpl<NextLayer>::Disconnect() {
  365. assert(lock_held(connection_state_lock_)); // Must be holding lock before calling
  366. LOG_INFO(kRPC, << "RpcConnectionImpl::Disconnect called");
  367. request_over_the_wire_.reset();
  368. if (connected_ == kConnecting || connected_ == kHandshaking || connected_ == kAuthenticating || connected_ == kConnected) {
  369. // Don't print out errors, we were expecting a disconnect here
  370. SafeDisconnect(get_asio_socket_ptr(&next_layer_));
  371. }
  372. connected_ = kDisconnected;
  373. }
  374. }
  375. #endif