fix: Missing logging in report (#603)
This commit is contained in:
parent
6013279574
commit
34ff60fbd4
|
@ -66,11 +66,19 @@ class BaseReport:
|
|||
"collectedItems": 0,
|
||||
"runningState": "not_started",
|
||||
"environment": {},
|
||||
"tests": [],
|
||||
"tests": defaultdict(list),
|
||||
"resultsTableHeader": {},
|
||||
"additionalSummary": defaultdict(list),
|
||||
}
|
||||
|
||||
@property
|
||||
def title(self):
|
||||
return self._data["title"]
|
||||
|
||||
@title.setter
|
||||
def title(self, title):
|
||||
self._data["title"] = title
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
return self._config
|
||||
|
@ -79,19 +87,33 @@ class BaseReport:
|
|||
def data(self):
|
||||
return self._data
|
||||
|
||||
def add_test(self, test):
|
||||
self._data["tests"].append(test)
|
||||
|
||||
def set_data(self, key, value):
|
||||
self._data[key] = value
|
||||
|
||||
@property
|
||||
def title(self):
|
||||
return self._data["title"]
|
||||
def add_test(self, test_data, report):
|
||||
# regardless of pass or fail we must add teardown logging to "call"
|
||||
if report.when == "teardown":
|
||||
self.update_test_log(report)
|
||||
|
||||
@title.setter
|
||||
def title(self, title):
|
||||
self._data["title"] = title
|
||||
# passed "setup" and "teardown" are not added to the html
|
||||
if report.when == "call" or _is_error(report):
|
||||
processed_logs = _process_logs(report)
|
||||
test_data["log"] = _handle_ansi(processed_logs)
|
||||
self._data["tests"][report.nodeid].append(test_data)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def update_test_log(self, report):
|
||||
log = []
|
||||
for test in self._data["tests"][report.nodeid]:
|
||||
if test["testId"] == report.nodeid:
|
||||
for section in report.sections:
|
||||
header, content = section
|
||||
if "teardown" in header:
|
||||
log.append(f" \n{header:-^80} ")
|
||||
log.append(content)
|
||||
test["log"] += _handle_ansi("\n".join(log))
|
||||
|
||||
def __init__(self, report_path, config, default_css="style.css"):
|
||||
self._report_path = Path(os.path.expandvars(report_path)).expanduser()
|
||||
|
@ -269,7 +291,6 @@ class BaseReport:
|
|||
|
||||
data = {
|
||||
"duration": report.duration,
|
||||
"when": report.when,
|
||||
}
|
||||
|
||||
test_id = report.nodeid
|
||||
|
@ -291,14 +312,11 @@ class BaseReport:
|
|||
test_id += f"::{report.when}"
|
||||
data["testId"] = test_id
|
||||
|
||||
# Order here matters!
|
||||
log = report.longreprtext or report.capstdout or "No log output captured."
|
||||
data["log"] = _handle_ansi(log)
|
||||
data["result"] = _process_outcome(report)
|
||||
data["extras"] = self._process_extras(report, test_id)
|
||||
|
||||
self._report.add_test(data)
|
||||
self._generate_report()
|
||||
if self._report.add_test(data, report):
|
||||
self._generate_report()
|
||||
|
||||
|
||||
class NextGenReport(BaseReport):
|
||||
|
@ -313,8 +331,6 @@ class NextGenReport(BaseReport):
|
|||
|
||||
@property
|
||||
def css(self):
|
||||
# print("woot", Path(self._assets_path.name, "style.css"))
|
||||
# print("waat", self._css_path.relative_to(self._report_path.parent))
|
||||
return Path(self._assets_path.name, "style.css")
|
||||
|
||||
def _data_content(self, content, asset_name, *args, **kwargs):
|
||||
|
@ -392,8 +408,25 @@ def _process_css(default_css, extra_css):
|
|||
return css
|
||||
|
||||
|
||||
def _is_error(report):
|
||||
return report.when in ["setup", "teardown"] and report.outcome == "failed"
|
||||
|
||||
|
||||
def _process_logs(report):
|
||||
log = []
|
||||
if report.longreprtext:
|
||||
log.append(report.longreprtext)
|
||||
for section in report.sections:
|
||||
header, content = section
|
||||
log.append(f" \n{header:-^80} ")
|
||||
log.append(content)
|
||||
if not log:
|
||||
log.append("No log output captured.")
|
||||
return "\n".join(log)
|
||||
|
||||
|
||||
def _process_outcome(report):
|
||||
if report.when in ["setup", "teardown"] and report.outcome == "failed":
|
||||
if _is_error(report):
|
||||
return "Error"
|
||||
if hasattr(report, "wasxfail"):
|
||||
if report.outcome in ["passed", "failed"]:
|
||||
|
|
|
@ -3,7 +3,7 @@ const { getCollapsedCategory } = require('./storage.js')
|
|||
class DataManager {
|
||||
setManager(data) {
|
||||
const collapsedCategories = [...getCollapsedCategory(), 'passed']
|
||||
const dataBlob = { ...data, tests: data.tests.map((test, index) => ({
|
||||
const dataBlob = { ...data, tests: Object.values(data.tests).flat().map((test, index) => ({
|
||||
...test,
|
||||
id: `test_${index}`,
|
||||
collapsed: collapsedCategories.includes(test.result.toLowerCase()),
|
||||
|
|
|
@ -84,6 +84,7 @@ const dom = {
|
|||
formattedDuration = formatDuration < 1 ? formattedDuration.ms : formattedDuration.formatted
|
||||
const resultBody = templateResult.content.cloneNode(true)
|
||||
resultBody.querySelector('tbody').classList.add(resultLower)
|
||||
resultBody.querySelector('tbody').id = testId
|
||||
resultBody.querySelector('.col-result').innerText = result
|
||||
resultBody.querySelector('.col-result').classList.add(`${collapsed ? 'expander' : 'collapser'}`)
|
||||
resultBody.querySelector('.col-result').dataset.id = id
|
||||
|
|
|
@ -29,8 +29,7 @@ const renderStatic = () => {
|
|||
}
|
||||
|
||||
const renderContent = (tests) => {
|
||||
const renderSet = tests.filter(({ when, result }) => when === 'call' || result === 'Error' )
|
||||
const rows = renderSet.map(dom.getResultTBody)
|
||||
const rows = tests.map(dom.getResultTBody)
|
||||
const table = document.querySelector('#results-table')
|
||||
removeChildren(table)
|
||||
const tableHeader = dom.getListHeader(manager.renderData)
|
||||
|
@ -62,8 +61,6 @@ const renderContent = (tests) => {
|
|||
}
|
||||
|
||||
const renderDerived = (tests, collectedItems, isFinished) => {
|
||||
const renderSet = tests.filter(({ when, result }) => when === 'call' || result === 'Error')
|
||||
|
||||
const possibleResults = [
|
||||
{ result: 'passed', label: 'Passed' },
|
||||
{ result: 'skipped', label: 'Skipped' },
|
||||
|
@ -76,7 +73,7 @@ const renderDerived = (tests, collectedItems, isFinished) => {
|
|||
|
||||
const currentFilter = getVisible()
|
||||
possibleResults.forEach(({ result, label }) => {
|
||||
const count = renderSet.filter((test) => test.result.toLowerCase() === result).length
|
||||
const count = tests.filter((test) => test.result.toLowerCase() === result).length
|
||||
const input = document.querySelector(`input[data-test-result="${result}"]`)
|
||||
document.querySelector(`.${result}`).innerText = `${count} ${label}`
|
||||
|
||||
|
@ -84,7 +81,7 @@ const renderDerived = (tests, collectedItems, isFinished) => {
|
|||
input.checked = currentFilter.includes(result)
|
||||
})
|
||||
|
||||
const numberOfTests = renderSet.filter(({ result }) =>
|
||||
const numberOfTests = tests.filter(({ result }) =>
|
||||
['Passed', 'Failed', 'XPassed', 'XFailed'].includes(result)).length
|
||||
|
||||
if (isFinished) {
|
||||
|
|
|
@ -31,7 +31,8 @@ def run(pytester, path="report.html", *args):
|
|||
pytester.runpytest("-s", "--html", path, *args)
|
||||
|
||||
chrome_options = webdriver.ChromeOptions()
|
||||
chrome_options.add_argument("--headless")
|
||||
if os.environ.get("CI", False):
|
||||
chrome_options.add_argument("--headless")
|
||||
chrome_options.add_argument("--window-size=1920x1080")
|
||||
driver = webdriver.Remote(
|
||||
command_executor="http://127.0.0.1:4444", options=chrome_options
|
||||
|
@ -90,9 +91,12 @@ def get_text(page, selector):
|
|||
return get_element(page, selector).string
|
||||
|
||||
|
||||
def get_log(page):
|
||||
def get_log(page, test_id=None):
|
||||
# TODO(jim) move to get_text (use .contents)
|
||||
log = get_element(page, ".summary div[class='log']")
|
||||
if test_id:
|
||||
log = get_element(page, f".summary tbody[id$='{test_id}'] div[class='log']")
|
||||
else:
|
||||
log = get_element(page, ".summary div[class='log']")
|
||||
all_text = ""
|
||||
for text in log.strings:
|
||||
all_text += text
|
||||
|
@ -527,3 +531,96 @@ class TestHTML:
|
|||
)
|
||||
page = run(pytester)
|
||||
assert_results(page, passed=1)
|
||||
|
||||
|
||||
class TestLogCapturing:
|
||||
LOG_LINE_REGEX = r"\s+this is {}"
|
||||
|
||||
@pytest.fixture
|
||||
def log_cli(self, pytester):
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
log_cli = 1
|
||||
log_cli_level = INFO
|
||||
log_cli_date_format = %Y-%m-%d %H:%M:%S
|
||||
log_cli_format = %(asctime)s %(levelname)s: %(message)s
|
||||
"""
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def test_file(self):
|
||||
return """
|
||||
import pytest
|
||||
import logging
|
||||
@pytest.fixture
|
||||
def setup():
|
||||
logging.info("this is setup")
|
||||
{setup}
|
||||
yield
|
||||
logging.info("this is teardown")
|
||||
{teardown}
|
||||
|
||||
def test_logging(setup):
|
||||
logging.info("this is test")
|
||||
assert {assertion}
|
||||
"""
|
||||
|
||||
@pytest.mark.usefixtures("log_cli")
|
||||
def test_all_pass(self, test_file, pytester):
|
||||
pytester.makepyfile(test_file.format(setup="", teardown="", assertion=True))
|
||||
page = run(pytester)
|
||||
assert_results(page, passed=1)
|
||||
|
||||
log = get_log(page)
|
||||
for when in ["setup", "test", "teardown"]:
|
||||
assert_that(log).matches(self.LOG_LINE_REGEX.format(when))
|
||||
|
||||
@pytest.mark.usefixtures("log_cli")
|
||||
def test_setup_error(self, test_file, pytester):
|
||||
pytester.makepyfile(
|
||||
test_file.format(setup="error", teardown="", assertion=True)
|
||||
)
|
||||
page = run(pytester)
|
||||
assert_results(page, error=1)
|
||||
|
||||
log = get_log(page)
|
||||
assert_that(log).matches(self.LOG_LINE_REGEX.format("setup"))
|
||||
assert_that(log).does_not_match(self.LOG_LINE_REGEX.format("test"))
|
||||
assert_that(log).does_not_match(self.LOG_LINE_REGEX.format("teardown"))
|
||||
|
||||
@pytest.mark.usefixtures("log_cli")
|
||||
def test_test_fails(self, test_file, pytester):
|
||||
pytester.makepyfile(test_file.format(setup="", teardown="", assertion=False))
|
||||
page = run(pytester)
|
||||
assert_results(page, failed=1)
|
||||
|
||||
log = get_log(page)
|
||||
for when in ["setup", "test", "teardown"]:
|
||||
assert_that(log).matches(self.LOG_LINE_REGEX.format(when))
|
||||
|
||||
@pytest.mark.usefixtures("log_cli")
|
||||
@pytest.mark.parametrize(
|
||||
"assertion, result", [(True, {"passed": 1}), (False, {"failed": 1})]
|
||||
)
|
||||
def test_teardown_error(self, test_file, pytester, assertion, result):
|
||||
pytester.makepyfile(
|
||||
test_file.format(setup="", teardown="error", assertion=assertion)
|
||||
)
|
||||
page = run(pytester)
|
||||
assert_results(page, error=1, **result)
|
||||
|
||||
for test_name in ["test_logging", "test_logging::teardown"]:
|
||||
log = get_log(page, test_name)
|
||||
for when in ["setup", "test", "teardown"]:
|
||||
assert_that(log).matches(self.LOG_LINE_REGEX.format(when))
|
||||
|
||||
def test_no_log(self, test_file, pytester):
|
||||
pytester.makepyfile(test_file.format(setup="", teardown="", assertion=True))
|
||||
page = run(pytester)
|
||||
assert_results(page, passed=1)
|
||||
|
||||
log = get_log(page, "test_logging")
|
||||
assert_that(log).contains("No log output captured.")
|
||||
for when in ["setup", "test", "teardown"]:
|
||||
assert_that(log).does_not_match(self.LOG_LINE_REGEX.format(when))
|
||||
|
|
Loading…
Reference in New Issue