Mastering PyTest: Custom Plugins, Fixtures and JSON Test Reporting
Setting up the project and goals
This tutorial walks through building a small but complete PyTest-powered project that demonstrates how to extend the framework with custom configuration, plugins, fixtures, markers, parameterization and automated JSON reporting. The goal is to show how PyTest can evolve from a simple test runner into an extensible system suitable for real projects.
Project bootstrap script
The example starts by preparing the environment, installing PyTest, and creating a clean project layout with folders for the calculation package, application utilities, and tests:
import sys, subprocess, os, textwrap, pathlib, json
subprocess.run([sys.executable, "-m", "pip", "install", "-q", "pytest>=8.0"], check=True)
root = pathlib.Path("pytest_advanced_tutorial").absolute()
if root.exists():
import shutil; shutil.rmtree(root)
(root / "calc").mkdir(parents=True)
(root / "app").mkdir()
(root / "tests").mkdir()
Configuring PyTest and adding a custom plugin
We define pytest.ini to set default options, test discovery paths and markers. The project also includes a powerful conftest.py that implements a plugin-like behavior: it adds a –runslow option, tracks passed/failed/skipped test counts, filters slow tests by default, and provides reusable fixtures such as settings, event_log, temp_json_file and fake_clock.
(root / "pytest.ini").write_text(textwrap.dedent("""
[pytest]
addopts = -q -ra --maxfail=1 -m "not slow"
testpaths = tests
markers =
slow: slow tests (use --runslow to run)
io: tests hitting the file system
api: tests patching external calls
""").strip()+"\n")
(root / "conftest.py").write_text(textwrap.dedent(r'''
import os, time, pytest, json
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", help="run slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: slow tests")
config._summary = {"passed":0,"failed":0,"skipped":0,"slow_ran":0}
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
return
skip = pytest.mark.skip(reason="need --runslow to run")
for item in items:
if "slow" in item.keywords: item.add_marker(skip)
def pytest_runtest_logreport(report):
cfg = report.config._summary
if report.when=="call":
if report.passed: cfg["passed"]+=1
elif report.failed: cfg["failed"]+=1
elif report.skipped: cfg["skipped"]+=1
if "slow" in report.keywords and report.passed: cfg["slow_ran"]+=1
def pytest_terminal_summary(terminalreporter, exitstatus, config):
s=config._summary
terminalreporter.write_sep("=", "SESSION SUMMARY (custom plugin)")
terminalreporter.write_line(f"Passed: {s['passed']} | Failed: {s['failed']} | Skipped: {s['skipped']}")
terminalreporter.write_line(f"Slow tests run: {s['slow_ran']}")
terminalreporter.write_line("PyTest finished successfully " if s["failed"]==0 else "Some tests failed ")
@pytest.fixture(scope="session")
def settings(): return {"env":"prod","max_retries":2}
@pytest.fixture(scope="function")
def event_log(): logs=[]; yield logs; print("\\nEVENT LOG:", logs)
@pytest.fixture
def temp_json_file(tmp_path):
p=tmp_path/"data.json"; p.write_text('{"msg":"hi"}'); return p
@pytest.fixture
def fake_clock(monkeypatch):
t={"now":1000.0}; monkeypatch.setattr(time,"time",lambda: t["now"]); return t
'''))
Core calculation package (calc)
The calc package contains small math utilities and a Vector class to demonstrate arithmetic operations and comparisons. These simple functions are ideal targets for unit tests and parametrized assertions.
(root/"calc"/"__init__.py").write_text(textwrap.dedent('''
from .vector import Vector
def add(a,b): return a+b
def div(a,b):
if b==0: raise ZeroDivisionError("division by zero")
return a/b
def moving_avg(xs,k):
if k<=0 or k>len(xs): raise ValueError("bad window")
out=[]; s=sum(xs[:k]); out.append(s/k)
for i in range(k,len(xs)):
s+=xs[i]-xs[i-k]; out.append(s/k)
return out
'''))
(root/"calc"/"vector.py").write_text(textwrap.dedent('''
class Vector:
__slots__("x","y","z")
def __init__(self,x=0,y=0,z=0): self.x,self.y,self.z=float(x),float(y),float(z)
def __add__(self,o): return Vector(self.x+o.x,self.y+o.y,self.z+o.z)
def __sub__(self,o): return Vector(self.x-o.x,self.y-o.y,self.z-o.z)
def __mul__(self,s): return Vector(self.x*s,self.y*s,self.z*s)
__rmul__=__mul__
def norm(self): return (self.x**2+self.y**2+self.z**2)**0.5
def __eq__(self,o): return abs(self.x-o.x)<1e-9 and abs(self.y-o.y)<1e-9 and abs(self.z-o.z)<1e-9
def __repr__(self): return f"Vector({self.x:.2f},{self.y:.2f},{self.z:.2f})"
'''))
Application utilities and a mocked API
The app package provides JSON I/O helpers and a simple API function that can switch to an offline mode for deterministic testing.
(root/"app"/"io_utils.py").write_text(textwrap.dedent('''
import json, pathlib, time
def save_json(path,obj):
path=pathlib.Path(path); path.write_text(json.dumps(obj)); return path
def load_json(path): return json.loads(pathlib.Path(path).read_text())
def timed_operation(fn,*a,**kw):
t0=time.time(); out=fn(*a,**kw); t1=time.time(); return out,t1-t0
'''))
(root/"app"/"api.py").write_text(textwrap.dedent('''
import os, time, random
def fetch_username(uid):
if os.environ.get("API_MODE")=="offline": return f"cached_{uid}"
time.sleep(0.001); return f"user_{uid}_{random.randint(100,999)}"
'''))
Test suite examples
Tests illustrate parametrization, xfail expectations, markers (io, api, slow), tmp_path, capsys, monkeypatch and fixtures like temp_json_file, event_log, and fake_clock.
(root/"tests"/"test_calc.py").write_text(textwrap.dedent('''
import pytest, math
from calc import add,div,moving_avg
from calc.vector import Vector
@pytest.mark.parametrize("a,b,exp",[(1,2,3),(0,0,0),(-1,1,0)])
def test_add(a,b,exp): assert add(a,b)==exp
@pytest.mark.parametrize("a,b,exp",[(6,3,2),(8,2,4)])
def test_div(a,b,exp): assert div(a,b)==exp
@pytest.mark.xfail(raises=ZeroDivisionError)
def test_div_zero(): div(1,0)
def test_avg(): assert moving_avg([1,2,3,4,5],3)==[2,3,4]
def test_vector_ops(): v=Vector(1,2,3)+Vector(4,5,6); assert v==Vector(5,7,9)
'''))
(root/"tests"/"test_io_api.py").write_text(textwrap.dedent('''
import pytest, os
from app.io_utils import save_json,load_json,timed_operation
from app.api import fetch_username
@pytest.mark.io
def test_io(temp_json_file,tmp_path):
d={"x":5}; p=tmp_path/"a.json"; save_json(p,d); assert load_json(p)==d
assert load_json(temp_json_file)=={"msg":"hi"}
def test_timed(capsys):
val,dt=timed_operation(lambda x:x*3,7); print("dt=",dt); out=capsys.readouterr().out
assert "dt=" in out and val==21
@pytest.mark.api
def test_api(monkeypatch):
monkeypatch.setenv("API_MODE","offline")
assert fetch_username(9)=="cached_9"
'''))
(root/"tests"/"test_slow.py").write_text(textwrap.dedent('''
import time, pytest
@pytest.mark.slow
def test_slow(event_log,fake_clock):
event_log.append(f"start@{fake_clock['now']}")
fake_clock["now"]+=3.0
event_log.append(f"end@{fake_clock['now']}")
assert len(event_log)==2
'''))
Running tests and producing a JSON summary
The tutorial runs the suite twice: once with default options that skip slow tests, and once including slow tests via –runslow. After both runs a summary JSON is written with run results, total test count and an example event log.
print(" Project created at:", root)
print("\n RUN #1 (default, skips @slow)\n")
r1=subprocess.run([sys.executable,"-m","pytest",str(root)],text=True)
print("\n RUN #2 (--runslow)\n")
r2=subprocess.run([sys.executable,"-m","pytest",str(root),"--runslow"],text=True)
summary_file=root/"summary.json"
summary={
"total_tests":sum("test_" in str(p) for p in root.rglob("test_*.py")),
"runs": ["default","--runslow"],
"results": ["success" if r1.returncode==0 else "fail",
"success" if r2.returncode==0 else "fail"],
"contains_slow_tests": True,
"example_event_log":["start@1000.0","end@1003.0"]
}
summary_file.write_text(json.dumps(summary,indent=2))
print("\n FINAL SUMMARY")
print(json.dumps(summary,indent=2))
print("\n Tutorial completed — all tests & summary generated successfully.")
Why this structure is useful
The arrangement shows how to combine configuration, lightweight plugins implemented in conftest.py, reusable fixtures, and deterministic testing techniques such as monkeypatching and fake clocks. The JSON summary illustrates how test runs can feed analytics or CI systems. The same patterns can be expanded with coverage, benchmarking or parallel execution for larger projects.