skipping.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. """ support for skip/xfail functions and markers. """
  2. import os
  3. import sys
  4. import traceback
  5. import py
  6. import pytest
  7. from _pytest.mark import MarkInfo, MarkDecorator
  8. def pytest_addoption(parser):
  9. group = parser.getgroup("general")
  10. group.addoption('--runxfail',
  11. action="store_true", dest="runxfail", default=False,
  12. help="run tests even if they are marked xfail")
  13. parser.addini("xfail_strict", "default for the strict parameter of xfail "
  14. "markers when not given explicitly (default: "
  15. "False)",
  16. default=False,
  17. type="bool")
  18. def pytest_configure(config):
  19. if config.option.runxfail:
  20. old = pytest.xfail
  21. config._cleanup.append(lambda: setattr(pytest, "xfail", old))
  22. def nop(*args, **kwargs):
  23. pass
  24. nop.Exception = XFailed
  25. setattr(pytest, "xfail", nop)
  26. config.addinivalue_line("markers",
  27. "skip(reason=None): skip the given test function with an optional reason. "
  28. "Example: skip(reason=\"no way of currently testing this\") skips the "
  29. "test."
  30. )
  31. config.addinivalue_line("markers",
  32. "skipif(condition): skip the given test function if eval(condition) "
  33. "results in a True value. Evaluation happens within the "
  34. "module global context. Example: skipif('sys.platform == \"win32\"') "
  35. "skips the test if we are on the win32 platform. see "
  36. "http://pytest.org/latest/skipping.html"
  37. )
  38. config.addinivalue_line("markers",
  39. "xfail(condition, reason=None, run=True, raises=None, strict=False): "
  40. "mark the the test function as an expected failure if eval(condition) "
  41. "has a True value. Optionally specify a reason for better reporting "
  42. "and run=False if you don't even want to execute the test function. "
  43. "If only specific exception(s) are expected, you can list them in "
  44. "raises, and if the test fails in other ways, it will be reported as "
  45. "a true failure. See http://pytest.org/latest/skipping.html"
  46. )
  47. def pytest_namespace():
  48. return dict(xfail=xfail)
  49. class XFailed(pytest.fail.Exception):
  50. """ raised from an explicit call to pytest.xfail() """
  51. def xfail(reason=""):
  52. """ xfail an executing test or setup functions with the given reason."""
  53. __tracebackhide__ = True
  54. raise XFailed(reason)
  55. xfail.Exception = XFailed
  56. class MarkEvaluator:
  57. def __init__(self, item, name):
  58. self.item = item
  59. self.name = name
  60. @property
  61. def holder(self):
  62. return self.item.keywords.get(self.name)
  63. def __bool__(self):
  64. return bool(self.holder)
  65. __nonzero__ = __bool__
  66. def wasvalid(self):
  67. return not hasattr(self, 'exc')
  68. def invalidraise(self, exc):
  69. raises = self.get('raises')
  70. if not raises:
  71. return
  72. return not isinstance(exc, raises)
  73. def istrue(self):
  74. try:
  75. return self._istrue()
  76. except Exception:
  77. self.exc = sys.exc_info()
  78. if isinstance(self.exc[1], SyntaxError):
  79. msg = [" " * (self.exc[1].offset + 4) + "^",]
  80. msg.append("SyntaxError: invalid syntax")
  81. else:
  82. msg = traceback.format_exception_only(*self.exc[:2])
  83. pytest.fail("Error evaluating %r expression\n"
  84. " %s\n"
  85. "%s"
  86. %(self.name, self.expr, "\n".join(msg)),
  87. pytrace=False)
  88. def _getglobals(self):
  89. d = {'os': os, 'sys': sys, 'config': self.item.config}
  90. func = self.item.obj
  91. try:
  92. d.update(func.__globals__)
  93. except AttributeError:
  94. d.update(func.func_globals)
  95. return d
  96. def _istrue(self):
  97. if hasattr(self, 'result'):
  98. return self.result
  99. if self.holder:
  100. d = self._getglobals()
  101. if self.holder.args or 'condition' in self.holder.kwargs:
  102. self.result = False
  103. # "holder" might be a MarkInfo or a MarkDecorator; only
  104. # MarkInfo keeps track of all parameters it received in an
  105. # _arglist attribute
  106. if hasattr(self.holder, '_arglist'):
  107. arglist = self.holder._arglist
  108. else:
  109. arglist = [(self.holder.args, self.holder.kwargs)]
  110. for args, kwargs in arglist:
  111. if 'condition' in kwargs:
  112. args = (kwargs['condition'],)
  113. for expr in args:
  114. self.expr = expr
  115. if isinstance(expr, py.builtin._basestring):
  116. result = cached_eval(self.item.config, expr, d)
  117. else:
  118. if "reason" not in kwargs:
  119. # XXX better be checked at collection time
  120. msg = "you need to specify reason=STRING " \
  121. "when using booleans as conditions."
  122. pytest.fail(msg)
  123. result = bool(expr)
  124. if result:
  125. self.result = True
  126. self.reason = kwargs.get('reason', None)
  127. self.expr = expr
  128. return self.result
  129. else:
  130. self.result = True
  131. return getattr(self, 'result', False)
  132. def get(self, attr, default=None):
  133. return self.holder.kwargs.get(attr, default)
  134. def getexplanation(self):
  135. expl = getattr(self, 'reason', None) or self.get('reason', None)
  136. if not expl:
  137. if not hasattr(self, 'expr'):
  138. return ""
  139. else:
  140. return "condition: " + str(self.expr)
  141. return expl
  142. @pytest.hookimpl(tryfirst=True)
  143. def pytest_runtest_setup(item):
  144. # Check if skip or skipif are specified as pytest marks
  145. skipif_info = item.keywords.get('skipif')
  146. if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
  147. eval_skipif = MarkEvaluator(item, 'skipif')
  148. if eval_skipif.istrue():
  149. item._evalskip = eval_skipif
  150. pytest.skip(eval_skipif.getexplanation())
  151. skip_info = item.keywords.get('skip')
  152. if isinstance(skip_info, (MarkInfo, MarkDecorator)):
  153. item._evalskip = True
  154. if 'reason' in skip_info.kwargs:
  155. pytest.skip(skip_info.kwargs['reason'])
  156. elif skip_info.args:
  157. pytest.skip(skip_info.args[0])
  158. else:
  159. pytest.skip("unconditional skip")
  160. item._evalxfail = MarkEvaluator(item, 'xfail')
  161. check_xfail_no_run(item)
  162. @pytest.mark.hookwrapper
  163. def pytest_pyfunc_call(pyfuncitem):
  164. check_xfail_no_run(pyfuncitem)
  165. outcome = yield
  166. passed = outcome.excinfo is None
  167. if passed:
  168. check_strict_xfail(pyfuncitem)
  169. def check_xfail_no_run(item):
  170. """check xfail(run=False)"""
  171. if not item.config.option.runxfail:
  172. evalxfail = item._evalxfail
  173. if evalxfail.istrue():
  174. if not evalxfail.get('run', True):
  175. pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
  176. def check_strict_xfail(pyfuncitem):
  177. """check xfail(strict=True) for the given PASSING test"""
  178. evalxfail = pyfuncitem._evalxfail
  179. if evalxfail.istrue():
  180. strict_default = pyfuncitem.config.getini('xfail_strict')
  181. is_strict_xfail = evalxfail.get('strict', strict_default)
  182. if is_strict_xfail:
  183. del pyfuncitem._evalxfail
  184. explanation = evalxfail.getexplanation()
  185. pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
  186. @pytest.hookimpl(hookwrapper=True)
  187. def pytest_runtest_makereport(item, call):
  188. outcome = yield
  189. rep = outcome.get_result()
  190. evalxfail = getattr(item, '_evalxfail', None)
  191. evalskip = getattr(item, '_evalskip', None)
  192. # unitttest special case, see setting of _unexpectedsuccess
  193. if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
  194. # we need to translate into how pytest encodes xpass
  195. rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
  196. rep.outcome = "failed"
  197. elif item.config.option.runxfail:
  198. pass # don't interefere
  199. elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
  200. rep.wasxfail = "reason: " + call.excinfo.value.msg
  201. rep.outcome = "skipped"
  202. elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
  203. evalxfail.istrue():
  204. if call.excinfo:
  205. if evalxfail.invalidraise(call.excinfo.value):
  206. rep.outcome = "failed"
  207. else:
  208. rep.outcome = "skipped"
  209. rep.wasxfail = evalxfail.getexplanation()
  210. elif call.when == "call":
  211. rep.outcome = "failed" # xpass outcome
  212. rep.wasxfail = evalxfail.getexplanation()
  213. elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
  214. # skipped by mark.skipif; change the location of the failure
  215. # to point to the item definition, otherwise it will display
  216. # the location of where the skip exception was raised within pytest
  217. filename, line, reason = rep.longrepr
  218. filename, line = item.location[:2]
  219. rep.longrepr = filename, line, reason
  220. # called by terminalreporter progress reporting
  221. def pytest_report_teststatus(report):
  222. if hasattr(report, "wasxfail"):
  223. if report.skipped:
  224. return "xfailed", "x", "xfail"
  225. elif report.failed:
  226. return "xpassed", "X", ("XPASS", {'yellow': True})
  227. # called by the terminalreporter instance/plugin
  228. def pytest_terminal_summary(terminalreporter):
  229. tr = terminalreporter
  230. if not tr.reportchars:
  231. #for name in "xfailed skipped failed xpassed":
  232. # if not tr.stats.get(name, 0):
  233. # tr.write_line("HINT: use '-r' option to see extra "
  234. # "summary info about tests")
  235. # break
  236. return
  237. lines = []
  238. for char in tr.reportchars:
  239. if char == "x":
  240. show_xfailed(terminalreporter, lines)
  241. elif char == "X":
  242. show_xpassed(terminalreporter, lines)
  243. elif char in "fF":
  244. show_simple(terminalreporter, lines, 'failed', "FAIL %s")
  245. elif char in "sS":
  246. show_skipped(terminalreporter, lines)
  247. elif char == "E":
  248. show_simple(terminalreporter, lines, 'error', "ERROR %s")
  249. elif char == 'p':
  250. show_simple(terminalreporter, lines, 'passed', "PASSED %s")
  251. if lines:
  252. tr._tw.sep("=", "short test summary info")
  253. for line in lines:
  254. tr._tw.line(line)
  255. def show_simple(terminalreporter, lines, stat, format):
  256. failed = terminalreporter.stats.get(stat)
  257. if failed:
  258. for rep in failed:
  259. pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
  260. lines.append(format %(pos,))
  261. def show_xfailed(terminalreporter, lines):
  262. xfailed = terminalreporter.stats.get("xfailed")
  263. if xfailed:
  264. for rep in xfailed:
  265. pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
  266. reason = rep.wasxfail
  267. lines.append("XFAIL %s" % (pos,))
  268. if reason:
  269. lines.append(" " + str(reason))
  270. def show_xpassed(terminalreporter, lines):
  271. xpassed = terminalreporter.stats.get("xpassed")
  272. if xpassed:
  273. for rep in xpassed:
  274. pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
  275. reason = rep.wasxfail
  276. lines.append("XPASS %s %s" %(pos, reason))
  277. def cached_eval(config, expr, d):
  278. if not hasattr(config, '_evalcache'):
  279. config._evalcache = {}
  280. try:
  281. return config._evalcache[expr]
  282. except KeyError:
  283. import _pytest._code
  284. exprcode = _pytest._code.compile(expr, mode="eval")
  285. config._evalcache[expr] = x = eval(exprcode, d)
  286. return x
  287. def folded_skips(skipped):
  288. d = {}
  289. for event in skipped:
  290. key = event.longrepr
  291. assert len(key) == 3, (event, key)
  292. d.setdefault(key, []).append(event)
  293. l = []
  294. for key, events in d.items():
  295. l.append((len(events),) + key)
  296. return l
  297. def show_skipped(terminalreporter, lines):
  298. tr = terminalreporter
  299. skipped = tr.stats.get('skipped', [])
  300. if skipped:
  301. #if not tr.hasopt('skipped'):
  302. # tr.write_line(
  303. # "%d skipped tests, specify -rs for more info" %
  304. # len(skipped))
  305. # return
  306. fskips = folded_skips(skipped)
  307. if fskips:
  308. #tr.write_sep("_", "skipped test summary")
  309. for num, fspath, lineno, reason in fskips:
  310. if reason.startswith("Skipped: "):
  311. reason = reason[9:]
  312. lines.append("SKIP [%d] %s:%d: %s" %
  313. (num, fspath, lineno, reason))