determine-flaky-tests-hadoop.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. #!/usr/bin/env python
  2. #
  3. # Licensed to the Apache Software Foundation (ASF) under one
  4. # or more contributor license agreements. See the NOTICE file
  5. # distributed with this work for additional information
  6. # regarding copyright ownership. The ASF licenses this file
  7. # to you under the Apache License, Version 2.0 (the
  8. # "License"); you may not use this file except in compliance
  9. # with the License. You may obtain a copy of the License at
  10. #
  11. # http://www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an "AS IS" BASIS,
  15. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. #
  19. # Given a jenkins test job, this script examines all runs of the job done
  20. # within specified period of time (number of days prior to the execution
  21. # time of this script), and reports all failed tests.
  22. #
  23. # The output of this script includes a section for each run that has failed
  24. # tests, with each failed test name listed.
  25. #
  26. # More importantly, at the end, it outputs a summary section to list all failed
  27. # tests within all examined runs, and indicate how many runs a same test
  28. # failed, and sorted all failed tests by how many runs each test failed.
  29. #
  30. # This way, when we see failed tests in PreCommit build, we can quickly tell
  31. # whether a failed test is a new failure, or it failed before and how often it
  32. # failed, so to have idea whether it may just be a flaky test.
  33. #
  34. # Of course, to be 100% sure about the reason of a test failure, closer look
  35. # at the failed test for the specific run is necessary.
  36. #
  37. import sys
  38. import platform
  39. sysversion = sys.hexversion
  40. onward30 = False
  41. if sysversion < 0x020600F0:
  42. sys.exit("Minimum supported python version is 2.6, the current version is " +
  43. "Python" + platform.python_version())
  44. if sysversion == 0x030000F0:
  45. sys.exit("There is a known bug with Python" + platform.python_version() +
  46. ", please try a different version");
  47. if sysversion < 0x03000000:
  48. import urllib2
  49. else:
  50. onward30 = True
  51. import urllib.request
  52. import datetime
  53. import json as simplejson
  54. import logging
  55. from optparse import OptionParser
  56. import time
  57. # Configuration
  58. DEFAULT_JENKINS_URL = "https://builds.apache.org"
  59. DEFAULT_JOB_NAME = "Hadoop-Common-trunk"
  60. DEFAULT_NUM_PREVIOUS_DAYS = 14
  61. SECONDS_PER_DAY = 86400
  62. # total number of runs to examine
  63. numRunsToExamine = 0
  64. """ Parse arguments """
  65. def parse_args():
  66. parser = OptionParser()
  67. parser.add_option("-J", "--jenkins-url", type="string",
  68. dest="jenkins_url", help="Jenkins URL",
  69. default=DEFAULT_JENKINS_URL)
  70. parser.add_option("-j", "--job-name", type="string",
  71. dest="job_name", help="Job name to look at",
  72. default=DEFAULT_JOB_NAME)
  73. parser.add_option("-n", "--num-days", type="int",
  74. dest="num_prev_days", help="Number of days to examine",
  75. default=DEFAULT_NUM_PREVIOUS_DAYS)
  76. (options, args) = parser.parse_args()
  77. if args:
  78. parser.error("unexpected arguments: " + repr(args))
  79. return options
  80. """ Load data from specified url """
  81. def load_url_data(url):
  82. if onward30:
  83. ourl = urllib.request.urlopen(url)
  84. codec = ourl.info().get_param('charset')
  85. content = ourl.read().decode(codec)
  86. data = simplejson.loads(content, strict=False)
  87. else:
  88. ourl = urllib2.urlopen(url)
  89. data = simplejson.load(ourl, strict=False)
  90. return data
  91. """ List all builds of the target project. """
  92. def list_builds(jenkins_url, job_name):
  93. url = "%(jenkins)s/job/%(job_name)s/api/json?tree=builds[url,result,timestamp]" % dict(
  94. jenkins=jenkins_url,
  95. job_name=job_name)
  96. try:
  97. data = load_url_data(url)
  98. except:
  99. logging.error("Could not fetch: %s" % url)
  100. raise
  101. return data['builds']
  102. """ Find the names of any tests which failed in the given build output URL. """
  103. def find_failing_tests(testReportApiJson, jobConsoleOutput):
  104. ret = set()
  105. try:
  106. data = load_url_data(testReportApiJson)
  107. except:
  108. logging.error(" Could not open testReport, check " +
  109. jobConsoleOutput + " for why it was reported failed")
  110. return ret
  111. for suite in data['suites']:
  112. for cs in suite['cases']:
  113. status = cs['status']
  114. errDetails = cs['errorDetails']
  115. if (status == 'REGRESSION' or status == 'FAILED' or (errDetails is not None)):
  116. ret.add(cs['className'] + "." + cs['name'])
  117. if len(ret) == 0:
  118. logging.info(" No failed tests in testReport, check " +
  119. jobConsoleOutput + " for why it was reported failed.")
  120. return ret
  121. """ Iterate runs of specfied job within num_prev_days and collect results """
  122. def find_flaky_tests(jenkins_url, job_name, num_prev_days):
  123. global numRunsToExamine
  124. all_failing = dict()
  125. # First list all builds
  126. builds = list_builds(jenkins_url, job_name)
  127. # Select only those in the last N days
  128. min_time = int(time.time()) - SECONDS_PER_DAY * num_prev_days
  129. builds = [b for b in builds if (int(b['timestamp']) / 1000) > min_time]
  130. # Filter out only those that failed
  131. failing_build_urls = [(b['url'] , b['timestamp']) for b in builds
  132. if (b['result'] in ('UNSTABLE', 'FAILURE'))]
  133. tnum = len(builds)
  134. num = len(failing_build_urls)
  135. numRunsToExamine = tnum
  136. logging.info(" THERE ARE " + str(num) + " builds (out of " + str(tnum)
  137. + ") that have failed tests in the past " + str(num_prev_days) + " days"
  138. + ((".", ", as listed below:\n")[num > 0]))
  139. for failed_build_with_time in failing_build_urls:
  140. failed_build = failed_build_with_time[0];
  141. jobConsoleOutput = failed_build + "Console";
  142. testReport = failed_build + "testReport";
  143. testReportApiJson = testReport + "/api/json";
  144. ts = float(failed_build_with_time[1]) / 1000.
  145. st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
  146. logging.info("===>%s" % str(testReport) + " (" + st + ")")
  147. failing = find_failing_tests(testReportApiJson, jobConsoleOutput)
  148. if failing:
  149. for ftest in failing:
  150. logging.info(" Failed test: %s" % ftest)
  151. all_failing[ftest] = all_failing.get(ftest,0)+1
  152. return all_failing
  153. def main():
  154. global numRunsToExamine
  155. logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
  156. # set up logger to write to stdout
  157. soh = logging.StreamHandler(sys.stdout)
  158. soh.setLevel(logging.INFO)
  159. logger = logging.getLogger()
  160. logger.removeHandler(logger.handlers[0])
  161. logger.addHandler(soh)
  162. opts = parse_args()
  163. logging.info("****Recently FAILED builds in url: " + opts.jenkins_url
  164. + "/job/" + opts.job_name + "")
  165. all_failing = find_flaky_tests(opts.jenkins_url, opts.job_name,
  166. opts.num_prev_days)
  167. if len(all_failing) == 0:
  168. raise SystemExit(0)
  169. logging.info("\nAmong " + str(numRunsToExamine) + " runs examined, all failed "
  170. + "tests <#failedRuns: testName>:")
  171. # print summary section: all failed tests sorted by how many times they failed
  172. for tn in sorted(all_failing, key=all_failing.get, reverse=True):
  173. logging.info(" " + str(all_failing[tn])+ ": " + tn)
  174. if __name__ == "__main__":
  175. main()