rpc_engine.cc 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "rpc_engine.h"
  19. #include "rpc_connection_impl.h"
  20. #include "common/util.h"
  21. #include "common/logging.h"
  22. #include "common/namenode_info.h"
  23. #include "common/optional_wrapper.h"
  24. #include <algorithm>
  25. #include <memory>
  26. #include <string>
  27. #include <boost/date_time/posix_time/posix_time_duration.hpp>
  28. #include <openssl/rand.h>
  29. #include <openssl/err.h>
  30. namespace hdfs {
  31. template <class T>
  32. using optional = std::experimental::optional<T>;
  33. RpcEngine::RpcEngine(std::shared_ptr<IoService> io_service, const Options &options,
  34. const std::shared_ptr<std::string> &client_name, const std::string &user_name,
  35. const char *protocol_name, int protocol_version)
  36. : io_service_(io_service),
  37. options_(options),
  38. client_name_(client_name),
  39. client_id_(getRandomClientId()),
  40. protocol_name_(protocol_name),
  41. protocol_version_(protocol_version),
  42. call_id_(0),
  43. retry_timer(io_service->GetRaw()),
  44. event_handlers_(std::make_shared<LibhdfsEvents>()),
  45. connect_canceled_(false)
  46. {
  47. LOG_DEBUG(kRPC, << "RpcEngine::RpcEngine called");
  48. auth_info_.setUser(user_name);
  49. if (options.authentication == Options::kKerberos) {
  50. auth_info_.setMethod(AuthInfo::kKerberos);
  51. }
  52. }
  53. void RpcEngine::Connect(const std::string &cluster_name,
  54. const std::vector<ResolvedNamenodeInfo> servers,
  55. RpcCallback &handler) {
  56. std::lock_guard<std::mutex> state_lock(engine_state_lock_);
  57. LOG_DEBUG(kRPC, << "RpcEngine::Connect called");
  58. last_endpoints_ = servers[0].endpoints;
  59. cluster_name_ = cluster_name;
  60. LOG_TRACE(kRPC, << "Got cluster name \"" << cluster_name << "\" in RpcEngine::Connect")
  61. ha_persisted_info_.reset(new HANamenodeTracker(servers, io_service_, event_handlers_));
  62. if(!ha_persisted_info_->is_enabled()) {
  63. ha_persisted_info_.reset();
  64. }
  65. // Construct retry policy after we determine if config is HA
  66. retry_policy_ = MakeRetryPolicy(options_);
  67. conn_ = InitializeConnection();
  68. conn_->Connect(last_endpoints_, auth_info_, handler);
  69. }
  70. bool RpcEngine::CancelPendingConnect() {
  71. if(connect_canceled_) {
  72. LOG_DEBUG(kRPC, << "RpcEngine@" << this << "::CancelPendingConnect called more than once");
  73. return false;
  74. }
  75. connect_canceled_ = true;
  76. return true;
  77. }
  78. void RpcEngine::Shutdown() {
  79. LOG_DEBUG(kRPC, << "RpcEngine::Shutdown called");
  80. io_service_->PostLambda([this]() {
  81. std::lock_guard<std::mutex> state_lock(engine_state_lock_);
  82. conn_.reset();
  83. });
  84. }
  85. std::unique_ptr<const RetryPolicy> RpcEngine::MakeRetryPolicy(const Options &options) {
  86. LOG_DEBUG(kRPC, << "RpcEngine::MakeRetryPolicy called");
  87. if(ha_persisted_info_) {
  88. LOG_INFO(kRPC, << "Cluster is HA configued so policy will default to HA until a knob is implemented");
  89. return std::unique_ptr<RetryPolicy>(new FixedDelayWithFailover(options.rpc_retry_delay_ms,
  90. options.max_rpc_retries,
  91. options.failover_max_retries,
  92. options.failover_connection_max_retries));
  93. } else if (options.max_rpc_retries > 0) {
  94. return std::unique_ptr<RetryPolicy>(new FixedDelayRetryPolicy(options.rpc_retry_delay_ms,
  95. options.max_rpc_retries));
  96. } else {
  97. return nullptr;
  98. }
  99. }
  100. std::unique_ptr<std::string> RpcEngine::getRandomClientId() {
  101. /**
  102. * The server is requesting a 16-byte UUID:
  103. * https://github.com/c9n/hadoop/blob/master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java
  104. *
  105. * This function generates a 16-byte UUID (version 4):
  106. * https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
  107. **/
  108. std::vector<unsigned char>buf(16);
  109. if (RAND_bytes(&buf[0], static_cast<int>(buf.size())) != 1) {
  110. const auto *error = ERR_reason_error_string(ERR_get_error());
  111. LOG_ERROR(kRPC, << "Unable to generate random client ID, err : " << error);
  112. return nullptr;
  113. }
  114. //clear the first four bits of byte 6 then set the second bit
  115. buf[6] = (buf[6] & 0x0f) | 0x40;
  116. //clear the second bit of byte 8 and set the first bit
  117. buf[8] = (buf[8] & 0xbf) | 0x80;
  118. return std::unique_ptr<std::string>(
  119. new std::string(reinterpret_cast<const char *>(&buf[0]), buf.size()));
  120. }
  121. void RpcEngine::TEST_SetRpcConnection(std::shared_ptr<RpcConnection> conn) {
  122. conn_ = conn;
  123. retry_policy_ = MakeRetryPolicy(options_);
  124. }
  125. void RpcEngine::TEST_SetRetryPolicy(std::unique_ptr<const RetryPolicy> policy) {
  126. retry_policy_ = std::move(policy);
  127. }
  128. std::unique_ptr<const RetryPolicy> RpcEngine::TEST_GenerateRetryPolicyUsingOptions() {
  129. return MakeRetryPolicy(options_);
  130. }
  131. void RpcEngine::AsyncRpc(
  132. const std::string &method_name, const ::google::protobuf::MessageLite *req,
  133. const std::shared_ptr<::google::protobuf::MessageLite> &resp,
  134. const std::function<void(const Status &)> &handler) {
  135. std::lock_guard<std::mutex> state_lock(engine_state_lock_);
  136. LOG_TRACE(kRPC, << "RpcEngine::AsyncRpc called");
  137. // In case user-side code isn't checking the status of Connect before doing RPC
  138. if(connect_canceled_) {
  139. io_service_->PostLambda(
  140. [handler](){ handler(Status::Canceled()); }
  141. );
  142. return;
  143. }
  144. if (!conn_) {
  145. conn_ = InitializeConnection();
  146. conn_->ConnectAndFlush(last_endpoints_);
  147. }
  148. conn_->AsyncRpc(method_name, req, resp, handler);
  149. }
  150. std::shared_ptr<RpcConnection> RpcEngine::NewConnection()
  151. {
  152. LOG_DEBUG(kRPC, << "RpcEngine::NewConnection called");
  153. return std::make_shared<RpcConnectionImpl<boost::asio::ip::tcp::socket>>(shared_from_this());
  154. }
  155. std::shared_ptr<RpcConnection> RpcEngine::InitializeConnection()
  156. {
  157. std::shared_ptr<RpcConnection> newConn = NewConnection();
  158. newConn->SetEventHandlers(event_handlers_);
  159. newConn->SetClusterName(cluster_name_);
  160. newConn->SetAuthInfo(auth_info_);
  161. return newConn;
  162. }
  163. void RpcEngine::AsyncRpcCommsError(
  164. const Status &status,
  165. std::shared_ptr<RpcConnection> failedConnection,
  166. std::vector<std::shared_ptr<Request>> pendingRequests) {
  167. LOG_ERROR(kRPC, << "RpcEngine::AsyncRpcCommsError called; status=\"" << status.ToString() << "\" conn=" << failedConnection.get() << " reqs=" << std::to_string(pendingRequests.size()));
  168. io_service_->PostLambda([this, status, failedConnection, pendingRequests]() {
  169. RpcCommsError(status, failedConnection, pendingRequests);
  170. });
  171. }
  172. void RpcEngine::RpcCommsError(
  173. const Status &status,
  174. std::shared_ptr<RpcConnection> failedConnection,
  175. std::vector<std::shared_ptr<Request>> pendingRequests) {
  176. LOG_WARN(kRPC, << "RpcEngine::RpcCommsError called; status=\"" << status.ToString() << "\" conn=" << failedConnection.get() << " reqs=" << std::to_string(pendingRequests.size()));
  177. std::lock_guard<std::mutex> state_lock(engine_state_lock_);
  178. // If the failed connection is the current one, shut it down
  179. // It will be reconnected when there is work to do
  180. if (failedConnection == conn_) {
  181. LOG_INFO(kRPC, << "Disconnecting from failed RpcConnection");
  182. conn_.reset();
  183. }
  184. optional<RetryAction> head_action = optional<RetryAction>();
  185. // Filter out anything with too many retries already
  186. if(event_handlers_) {
  187. event_handlers_->call(FS_NN_PRE_RPC_RETRY_EVENT, "RpcCommsError",
  188. reinterpret_cast<int64_t>(this));
  189. }
  190. for (auto it = pendingRequests.begin(); it < pendingRequests.end();) {
  191. auto req = *it;
  192. LOG_DEBUG(kRPC, << req->GetDebugString());
  193. RetryAction retry = RetryAction::fail(""); // Default to fail
  194. if(connect_canceled_) {
  195. retry = RetryAction::fail("Operation canceled");
  196. } else if (status.notWorthRetry()) {
  197. retry = RetryAction::fail(status.ToString().c_str());
  198. } else if (retry_policy()) {
  199. retry = retry_policy()->ShouldRetry(status, req->IncrementRetryCount(), req->get_failover_count(), true);
  200. }
  201. if (retry.action == RetryAction::FAIL) {
  202. // If we've exceeded the maximum retry, take the latest error and pass it
  203. // on. There might be a good argument for caching the first error
  204. // rather than the last one, that gets messy
  205. io_service()->PostLambda([req, status]() {
  206. req->OnResponseArrived(nullptr, status); // Never call back while holding a lock
  207. });
  208. it = pendingRequests.erase(it);
  209. } else {
  210. if (!head_action) {
  211. head_action = retry;
  212. }
  213. ++it;
  214. }
  215. }
  216. // If we have reqests that need to be re-sent, ensure that we have a connection
  217. // and send the requests to it
  218. bool haveRequests = !pendingRequests.empty() &&
  219. head_action && head_action->action != RetryAction::FAIL;
  220. if (haveRequests) {
  221. LOG_TRACE(kRPC, << "Have " << std::to_string(pendingRequests.size()) << " requests to resend");
  222. bool needNewConnection = !conn_;
  223. if (needNewConnection) {
  224. LOG_DEBUG(kRPC, << "Creating a new NN conection");
  225. // If HA is enabled and we have valid HA info then fail over to the standby (hopefully now active)
  226. if(head_action->action == RetryAction::FAILOVER_AND_RETRY && ha_persisted_info_) {
  227. for(unsigned int i=0; i<pendingRequests.size();i++) {
  228. pendingRequests[i]->IncrementFailoverCount();
  229. }
  230. ResolvedNamenodeInfo new_active_nn_info;
  231. bool failoverInfoFound = ha_persisted_info_->GetFailoverAndUpdate(last_endpoints_, new_active_nn_info);
  232. if(!failoverInfoFound) {
  233. // This shouldn't be a common case, the set of endpoints was empty, likely due to DNS issues.
  234. // Another possibility is a network device has been added or removed due to a VM starting or stopping.
  235. LOG_ERROR(kRPC, << "Failed to find endpoints for the alternate namenode."
  236. << "Make sure Namenode hostnames can be found with a DNS lookup.");
  237. // Kill all pending RPC requests since there's nowhere for this to go
  238. Status badEndpointStatus = Status::Error("No endpoints found for namenode");
  239. for(unsigned int i=0; i<pendingRequests.size(); i++) {
  240. std::shared_ptr<Request> sharedCurrentRequest = pendingRequests[i];
  241. io_service()->PostLambda([sharedCurrentRequest, badEndpointStatus]() {
  242. sharedCurrentRequest->OnResponseArrived(nullptr, badEndpointStatus); // Never call back while holding a lock
  243. });
  244. }
  245. // Clear request vector. This isn't a recoverable error.
  246. pendingRequests.clear();
  247. }
  248. if(ha_persisted_info_->is_resolved()) {
  249. LOG_INFO(kRPC, << "Going to try connecting to alternate Namenode: " << new_active_nn_info.uri.str());
  250. last_endpoints_ = new_active_nn_info.endpoints;
  251. } else {
  252. LOG_WARN(kRPC, << "It looks HA is turned on, but unable to fail over. has info="
  253. << ha_persisted_info_->is_enabled() << " resolved=" << ha_persisted_info_->is_resolved());
  254. }
  255. }
  256. conn_ = InitializeConnection();
  257. conn_->PreEnqueueRequests(pendingRequests);
  258. if (head_action->delayMillis > 0) {
  259. auto weak_conn = std::weak_ptr<RpcConnection>(conn_);
  260. retry_timer.expires_from_now(
  261. boost::posix_time::milliseconds(head_action->delayMillis));
  262. retry_timer.async_wait([this, weak_conn](boost::system::error_code ec) {
  263. auto strong_conn = weak_conn.lock();
  264. if ( (!ec) && (strong_conn) ) {
  265. strong_conn->ConnectAndFlush(last_endpoints_);
  266. }
  267. });
  268. } else {
  269. conn_->ConnectAndFlush(last_endpoints_);
  270. }
  271. } else {
  272. // We have an existing connection (which might be closed; we don't know
  273. // until we hold the connection local) and should just add the new requests
  274. conn_->AsyncRpc(pendingRequests);
  275. }
  276. }
  277. }
  278. void RpcEngine::SetFsEventCallback(fs_event_callback callback) {
  279. event_handlers_->set_fs_callback(callback);
  280. }
  281. }