Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1""" support for skip/xfail functions and markers. """ 

2import os 

3import platform 

4import sys 

5import traceback 

6from typing import Generator 

7from typing import Optional 

8from typing import Tuple 

9 

10import attr 

11 

12from _pytest.compat import TYPE_CHECKING 

13from _pytest.config import Config 

14from _pytest.config import hookimpl 

15from _pytest.config.argparsing import Parser 

16from _pytest.mark.structures import Mark 

17from _pytest.nodes import Item 

18from _pytest.outcomes import fail 

19from _pytest.outcomes import skip 

20from _pytest.outcomes import xfail 

21from _pytest.reports import BaseReport 

22from _pytest.runner import CallInfo 

23from _pytest.store import StoreKey 

24 

25if TYPE_CHECKING: 

26 from typing import Type 

27 

28 

29def pytest_addoption(parser: Parser) -> None: 

30 group = parser.getgroup("general") 

31 group.addoption( 

32 "--runxfail", 

33 action="store_true", 

34 dest="runxfail", 

35 default=False, 

36 help="report the results of xfail tests as if they were not marked", 

37 ) 

38 

39 parser.addini( 

40 "xfail_strict", 

41 "default for the strict parameter of xfail " 

42 "markers when not given explicitly (default: False)", 

43 default=False, 

44 type="bool", 

45 ) 

46 

47 

48def pytest_configure(config: Config) -> None: 

49 if config.option.runxfail: 

50 # yay a hack 

51 import pytest 

52 

53 old = pytest.xfail 

54 config._cleanup.append(lambda: setattr(pytest, "xfail", old)) 

55 

56 def nop(*args, **kwargs): 

57 pass 

58 

59 nop.Exception = xfail.Exception # type: ignore[attr-defined] 

60 setattr(pytest, "xfail", nop) 

61 

62 config.addinivalue_line( 

63 "markers", 

64 "skip(reason=None): skip the given test function with an optional reason. " 

65 'Example: skip(reason="no way of currently testing this") skips the ' 

66 "test.", 

67 ) 

68 config.addinivalue_line( 

69 "markers", 

70 "skipif(condition, ..., *, reason=...): " 

71 "skip the given test function if any of the conditions evaluate to True. " 

72 "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " 

73 "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif", 

74 ) 

75 config.addinivalue_line( 

76 "markers", 

77 "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " 

78 "mark the test function as an expected failure if any of the conditions " 

79 "evaluate to True. Optionally specify a reason for better reporting " 

80 "and run=False if you don't even want to execute the test function. " 

81 "If only specific exception(s) are expected, you can list them in " 

82 "raises, and if the test fails in other ways, it will be reported as " 

83 "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail", 

84 ) 

85 

86 

87def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]: 

88 """Evaluate a single skipif/xfail condition. 

89 

90 If an old-style string condition is given, it is eval()'d, otherwise the 

91 condition is bool()'d. If this fails, an appropriately formatted pytest.fail 

92 is raised. 

93 

94 Returns (result, reason). The reason is only relevant if the result is True. 

95 """ 

96 # String condition. 

97 if isinstance(condition, str): 

98 globals_ = { 

99 "os": os, 

100 "sys": sys, 

101 "platform": platform, 

102 "config": item.config, 

103 } 

104 if hasattr(item, "obj"): 

105 globals_.update(item.obj.__globals__) # type: ignore[attr-defined] 

106 try: 

107 filename = "<{} condition>".format(mark.name) 

108 condition_code = compile(condition, filename, "eval") 

109 result = eval(condition_code, globals_) 

110 except SyntaxError as exc: 

111 msglines = [ 

112 "Error evaluating %r condition" % mark.name, 

113 " " + condition, 

114 " " + " " * (exc.offset or 0) + "^", 

115 "SyntaxError: invalid syntax", 

116 ] 

117 fail("\n".join(msglines), pytrace=False) 

118 except Exception as exc: 

119 msglines = [ 

120 "Error evaluating %r condition" % mark.name, 

121 " " + condition, 

122 *traceback.format_exception_only(type(exc), exc), 

123 ] 

124 fail("\n".join(msglines), pytrace=False) 

125 

126 # Boolean condition. 

127 else: 

128 try: 

129 result = bool(condition) 

130 except Exception as exc: 

131 msglines = [ 

132 "Error evaluating %r condition as a boolean" % mark.name, 

133 *traceback.format_exception_only(type(exc), exc), 

134 ] 

135 fail("\n".join(msglines), pytrace=False) 

136 

137 reason = mark.kwargs.get("reason", None) 

138 if reason is None: 

139 if isinstance(condition, str): 

140 reason = "condition: " + condition 

141 else: 

142 # XXX better be checked at collection time 

143 msg = ( 

144 "Error evaluating %r: " % mark.name 

145 + "you need to specify reason=STRING when using booleans as conditions." 

146 ) 

147 fail(msg, pytrace=False) 

148 

149 return result, reason 

150 

151 

152@attr.s(slots=True, frozen=True) 

153class Skip: 

154 """The result of evaluate_skip_marks().""" 

155 

156 reason = attr.ib(type=str) 

157 

158 

159def evaluate_skip_marks(item: Item) -> Optional[Skip]: 

160 """Evaluate skip and skipif marks on item, returning Skip if triggered.""" 

161 for mark in item.iter_markers(name="skipif"): 

162 if "condition" not in mark.kwargs: 

163 conditions = mark.args 

164 else: 

165 conditions = (mark.kwargs["condition"],) 

166 

167 # Unconditional. 

168 if not conditions: 

169 reason = mark.kwargs.get("reason", "") 

170 return Skip(reason) 

171 

172 # If any of the conditions are true. 

173 for condition in conditions: 

174 result, reason = evaluate_condition(item, mark, condition) 

175 if result: 

176 return Skip(reason) 

177 

178 for mark in item.iter_markers(name="skip"): 

179 if "reason" in mark.kwargs: 

180 reason = mark.kwargs["reason"] 

181 elif mark.args: 

182 reason = mark.args[0] 

183 else: 

184 reason = "unconditional skip" 

185 return Skip(reason) 

186 

187 return None 

188 

189 

190@attr.s(slots=True, frozen=True) 

191class Xfail: 

192 """The result of evaluate_xfail_marks().""" 

193 

194 reason = attr.ib(type=str) 

195 run = attr.ib(type=bool) 

196 strict = attr.ib(type=bool) 

197 raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]]) 

198 

199 

200def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: 

201 """Evaluate xfail marks on item, returning Xfail if triggered.""" 

202 for mark in item.iter_markers(name="xfail"): 

203 run = mark.kwargs.get("run", True) 

204 strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) 

205 raises = mark.kwargs.get("raises", None) 

206 if "condition" not in mark.kwargs: 

207 conditions = mark.args 

208 else: 

209 conditions = (mark.kwargs["condition"],) 

210 

211 # Unconditional. 

212 if not conditions: 

213 reason = mark.kwargs.get("reason", "") 

214 return Xfail(reason, run, strict, raises) 

215 

216 # If any of the conditions are true. 

217 for condition in conditions: 

218 result, reason = evaluate_condition(item, mark, condition) 

219 if result: 

220 return Xfail(reason, run, strict, raises) 

221 

222 return None 

223 

224 

225# Whether skipped due to skip or skipif marks. 

226skipped_by_mark_key = StoreKey[bool]() 

227# Saves the xfail mark evaluation. Can be refreshed during call if None. 

228xfailed_key = StoreKey[Optional[Xfail]]() 

229unexpectedsuccess_key = StoreKey[str]() 

230 

231 

232@hookimpl(tryfirst=True) 

233def pytest_runtest_setup(item: Item) -> None: 

234 skipped = evaluate_skip_marks(item) 

235 item._store[skipped_by_mark_key] = skipped is not None 

236 if skipped: 

237 skip(skipped.reason) 

238 

239 item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) 

240 if xfailed and not item.config.option.runxfail and not xfailed.run: 

241 xfail("[NOTRUN] " + xfailed.reason) 

242 

243 

244@hookimpl(hookwrapper=True) 

245def pytest_runtest_call(item: Item) -> Generator[None, None, None]: 

246 xfailed = item._store.get(xfailed_key, None) 

247 if xfailed is None: 

248 item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) 

249 

250 if xfailed and not item.config.option.runxfail and not xfailed.run: 

251 xfail("[NOTRUN] " + xfailed.reason) 

252 

253 yield 

254 

255 # The test run may have added an xfail mark dynamically. 

256 xfailed = item._store.get(xfailed_key, None) 

257 if xfailed is None: 

258 item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) 

259 

260 

261@hookimpl(hookwrapper=True) 

262def pytest_runtest_makereport(item: Item, call: CallInfo[None]): 

263 outcome = yield 

264 rep = outcome.get_result() 

265 xfailed = item._store.get(xfailed_key, None) 

266 # unittest special case, see setting of unexpectedsuccess_key 

267 if unexpectedsuccess_key in item._store and rep.when == "call": 

268 reason = item._store[unexpectedsuccess_key] 

269 if reason: 

270 rep.longrepr = "Unexpected success: {}".format(reason) 

271 else: 

272 rep.longrepr = "Unexpected success" 

273 rep.outcome = "failed" 

274 elif item.config.option.runxfail: 

275 pass # don't interfere 

276 elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): 

277 assert call.excinfo.value.msg is not None 

278 rep.wasxfail = "reason: " + call.excinfo.value.msg 

279 rep.outcome = "skipped" 

280 elif not rep.skipped and xfailed: 

281 if call.excinfo: 

282 raises = xfailed.raises 

283 if raises is not None and not isinstance(call.excinfo.value, raises): 

284 rep.outcome = "failed" 

285 else: 

286 rep.outcome = "skipped" 

287 rep.wasxfail = xfailed.reason 

288 elif call.when == "call": 

289 if xfailed.strict: 

290 rep.outcome = "failed" 

291 rep.longrepr = "[XPASS(strict)] " + xfailed.reason 

292 else: 

293 rep.outcome = "passed" 

294 rep.wasxfail = xfailed.reason 

295 

296 if ( 

297 item._store.get(skipped_by_mark_key, True) 

298 and rep.skipped 

299 and type(rep.longrepr) is tuple 

300 ): 

301 # skipped by mark.skipif; change the location of the failure 

302 # to point to the item definition, otherwise it will display 

303 # the location of where the skip exception was raised within pytest 

304 _, _, reason = rep.longrepr 

305 filename, line = item.reportinfo()[:2] 

306 assert line is not None 

307 rep.longrepr = str(filename), line + 1, reason 

308 

309 

310def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: 

311 if hasattr(report, "wasxfail"): 

312 if report.skipped: 

313 return "xfailed", "x", "XFAIL" 

314 elif report.passed: 

315 return "xpassed", "X", "XPASS" 

316 return None