test_zkfc.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. #!/usr/bin/env python
  2. '''
  3. Licensed to the Apache Software Foundation (ASF) under one
  4. or more contributor license agreements. See the NOTICE file
  5. distributed with this work for additional information
  6. regarding copyright ownership. The ASF licenses this file
  7. to you under the Apache License, Version 2.0 (the
  8. "License"); you may not use this file except in compliance
  9. with the License. You may obtain a copy of the License at
  10. http://www.apache.org/licenses/LICENSE-2.0
  11. Unless required by applicable law or agreed to in writing, software
  12. distributed under the License is distributed on an "AS IS" BASIS,
  13. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. See the License for the specific language governing permissions and
  15. limitations under the License.
  16. '''
  17. from stacks.utils.RMFTestCase import *
  18. from ambari_commons import OSCheck
  19. from mock.mock import MagicMock, patch
  20. from resource_management.core import shell
  21. class TestZkfc(RMFTestCase):
  22. COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
  23. STACK_VERSION = "2.0.6"
  24. def test_start_default(self):
  25. self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
  26. classname = "ZkfcSlave",
  27. command = "start",
  28. config_file = "ha_default.json",
  29. stack_version = self.STACK_VERSION,
  30. target = RMFTestCase.TARGET_COMMON_SERVICES
  31. )
  32. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
  33. create_parents = True,
  34. )
  35. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
  36. create_parents = True,
  37. )
  38. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
  39. to = '/usr/lib/hadoop/lib/libsnappy.so',
  40. )
  41. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
  42. to = '/usr/lib/hadoop/lib64/libsnappy.so',
  43. )
  44. self.assertResourceCalled('Directory', '/etc/security/limits.d',
  45. owner = 'root',
  46. group = 'root',
  47. create_parents = True,
  48. )
  49. self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
  50. content = Template('hdfs.conf.j2'),
  51. owner = 'root',
  52. group = 'root',
  53. mode = 0644,
  54. )
  55. self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
  56. owner = 'hdfs',
  57. group = 'hadoop',
  58. conf_dir = '/etc/hadoop/conf',
  59. configurations = self.getConfig()['configurations']['hdfs-site'],
  60. configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
  61. )
  62. self.assertResourceCalled('XmlConfig', 'core-site.xml',
  63. owner = 'hdfs',
  64. group = 'hadoop',
  65. conf_dir = '/etc/hadoop/conf',
  66. configurations = self.getConfig()['configurations']['core-site'],
  67. configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
  68. mode = 0644
  69. )
  70. self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
  71. content = Template('slaves.j2'),
  72. owner = 'hdfs',
  73. )
  74. self.assertResourceCalled('Directory', '/var/run/hadoop',
  75. owner = 'hdfs',
  76. group = 'hadoop',
  77. mode = 0755
  78. )
  79. self.assertResourceCalled('Directory', '/var/run/hadoop',
  80. owner = 'hdfs',
  81. group = 'hadoop',
  82. mode = 0755,
  83. )
  84. self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
  85. owner = 'hdfs',
  86. create_parents = True,
  87. group = 'hadoop'
  88. )
  89. self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
  90. owner = 'hdfs',
  91. create_parents = True,
  92. group = 'hadoop'
  93. )
  94. self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
  95. action = ['delete'],
  96. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  97. )
  98. self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
  99. environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
  100. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  101. )
  102. self.assertNoMoreResources()
  103. def test_stop_default(self):
  104. self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
  105. classname = "ZkfcSlave",
  106. command = "stop",
  107. config_file = "ha_default.json",
  108. stack_version = self.STACK_VERSION,
  109. target = RMFTestCase.TARGET_COMMON_SERVICES
  110. )
  111. self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
  112. environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
  113. only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid")
  114. self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid', action = ['delete'])
  115. self.assertNoMoreResources()
  116. def test_start_secured(self):
  117. self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
  118. classname = "ZkfcSlave",
  119. command = "start",
  120. config_file = "ha_secured.json",
  121. stack_version = self.STACK_VERSION,
  122. target = RMFTestCase.TARGET_COMMON_SERVICES
  123. )
  124. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
  125. create_parents = True,
  126. )
  127. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
  128. create_parents = True,
  129. )
  130. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
  131. to = '/usr/lib/hadoop/lib/libsnappy.so',
  132. )
  133. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
  134. to = '/usr/lib/hadoop/lib64/libsnappy.so',
  135. )
  136. self.assertResourceCalled('Directory', '/etc/security/limits.d',
  137. owner = 'root',
  138. group = 'root',
  139. create_parents = True,
  140. )
  141. self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
  142. content = Template('hdfs.conf.j2'),
  143. owner = 'root',
  144. group = 'root',
  145. mode = 0644,
  146. )
  147. self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
  148. owner = 'hdfs',
  149. group = 'hadoop',
  150. conf_dir = '/etc/hadoop/conf',
  151. configurations = self.getConfig()['configurations']['hdfs-site'],
  152. configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
  153. )
  154. self.assertResourceCalled('XmlConfig', 'core-site.xml',
  155. owner = 'hdfs',
  156. group = 'hadoop',
  157. conf_dir = '/etc/hadoop/conf',
  158. configurations = self.getConfig()['configurations']['core-site'],
  159. configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
  160. mode = 0644
  161. )
  162. self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
  163. content = Template('slaves.j2'),
  164. owner = 'root',
  165. )
  166. self.assertResourceCalled('Directory', '/var/run/hadoop',
  167. owner = 'hdfs',
  168. group = 'hadoop',
  169. mode = 0755
  170. )
  171. self.assertResourceCalled('Directory', '/var/run/hadoop',
  172. owner = 'hdfs',
  173. group = 'hadoop',
  174. mode = 0755,
  175. )
  176. self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
  177. owner = 'hdfs',
  178. create_parents = True,
  179. group = 'hadoop'
  180. )
  181. self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
  182. owner = 'hdfs',
  183. create_parents = True,
  184. group = 'hadoop'
  185. )
  186. self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
  187. action = ['delete'],
  188. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  189. )
  190. self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
  191. environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
  192. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  193. )
  194. self.assertNoMoreResources()
  195. def test_stop_secured(self):
  196. self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
  197. classname = "ZkfcSlave",
  198. command = "stop",
  199. config_file = "ha_secured.json",
  200. stack_version = self.STACK_VERSION,
  201. target = RMFTestCase.TARGET_COMMON_SERVICES
  202. )
  203. self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
  204. environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
  205. only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid")
  206. self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid', action = ['delete'])
  207. self.assertNoMoreResources()
  208. def test_start_with_ha_active_namenode_bootstrap(self):
  209. self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
  210. classname = "ZkfcSlave",
  211. command = "start",
  212. config_file="ha_bootstrap_active_node.json",
  213. stack_version = self.STACK_VERSION,
  214. target = RMFTestCase.TARGET_COMMON_SERVICES
  215. )
  216. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
  217. create_parents = True,
  218. )
  219. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
  220. create_parents = True,
  221. )
  222. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
  223. to = '/usr/lib/hadoop/lib/libsnappy.so',
  224. )
  225. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
  226. to = '/usr/lib/hadoop/lib64/libsnappy.so',
  227. )
  228. self.assertResourceCalled('Directory', '/etc/security/limits.d',
  229. owner = 'root',
  230. group = 'root',
  231. create_parents = True,
  232. )
  233. self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
  234. content = Template('hdfs.conf.j2'),
  235. owner = 'root',
  236. group = 'root',
  237. mode = 0644,
  238. )
  239. self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
  240. owner = 'hdfs',
  241. group = 'hadoop',
  242. conf_dir = '/etc/hadoop/conf',
  243. configurations = self.getConfig()['configurations']['hdfs-site'],
  244. configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
  245. )
  246. self.assertResourceCalled('XmlConfig', 'core-site.xml',
  247. owner = 'hdfs',
  248. group = 'hadoop',
  249. conf_dir = '/etc/hadoop/conf',
  250. configurations = self.getConfig()['configurations']['core-site'],
  251. configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
  252. mode = 0644
  253. )
  254. self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
  255. content = Template('slaves.j2'),
  256. owner = 'hdfs',
  257. )
  258. self.assertResourceCalled('Directory', '/var/run/hadoop',
  259. owner = 'hdfs',
  260. group = 'hadoop',
  261. mode = 0755
  262. )
  263. # TODO: verify that the znode initialization occurs prior to ZKFC startup
  264. self.assertResourceCalled('Directory', '/var/run/hadoop',
  265. owner = 'hdfs',
  266. group = 'hadoop',
  267. mode = 0755,
  268. )
  269. self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
  270. owner = 'hdfs',
  271. create_parents = True,
  272. group = 'hadoop'
  273. )
  274. self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
  275. owner = 'hdfs',
  276. create_parents = True,
  277. group = 'hadoop'
  278. )
  279. self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
  280. action = ['delete'],
  281. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  282. )
  283. self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
  284. environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
  285. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  286. )
  287. self.assertNoMoreResources()
  288. def test_start_with_ha_standby_namenode_bootstrap(self):
  289. self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
  290. classname = "ZkfcSlave",
  291. command = "start",
  292. config_file="ha_bootstrap_standby_node.json",
  293. stack_version = self.STACK_VERSION,
  294. target = RMFTestCase.TARGET_COMMON_SERVICES
  295. )
  296. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
  297. create_parents = True,
  298. )
  299. self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
  300. create_parents = True,
  301. )
  302. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
  303. to = '/usr/lib/hadoop/lib/libsnappy.so',
  304. )
  305. self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
  306. to = '/usr/lib/hadoop/lib64/libsnappy.so',
  307. )
  308. self.assertResourceCalled('Directory', '/etc/security/limits.d',
  309. owner = 'root',
  310. group = 'root',
  311. create_parents = True,
  312. )
  313. self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
  314. content = Template('hdfs.conf.j2'),
  315. owner = 'root',
  316. group = 'root',
  317. mode = 0644,
  318. )
  319. self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
  320. owner = 'hdfs',
  321. group = 'hadoop',
  322. conf_dir = '/etc/hadoop/conf',
  323. configurations = self.getConfig()['configurations']['hdfs-site'],
  324. configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
  325. )
  326. self.assertResourceCalled('XmlConfig', 'core-site.xml',
  327. owner = 'hdfs',
  328. group = 'hadoop',
  329. conf_dir = '/etc/hadoop/conf',
  330. configurations = self.getConfig()['configurations']['core-site'],
  331. configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
  332. mode = 0644
  333. )
  334. self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
  335. content = Template('slaves.j2'),
  336. owner = 'hdfs',
  337. )
  338. self.assertResourceCalled('Directory', '/var/run/hadoop',
  339. owner = 'hdfs',
  340. group = 'hadoop',
  341. mode = 0755
  342. )
  343. # TODO: verify that the znode initialization occurs prior to ZKFC startup
  344. self.assertResourceCalled('Directory', '/var/run/hadoop',
  345. owner = 'hdfs',
  346. group = 'hadoop',
  347. mode = 0755,
  348. )
  349. self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
  350. owner = 'hdfs',
  351. create_parents = True,
  352. group = 'hadoop'
  353. )
  354. self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
  355. owner = 'hdfs',
  356. group = 'hadoop',
  357. create_parents = True,
  358. )
  359. self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
  360. action = ['delete'],
  361. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  362. )
  363. self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ; /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
  364. environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
  365. not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid",
  366. )
  367. self.assertNoMoreResources()