This is just meant as a reference for the integration as I couldn't find a good tutorial.
The code and the readme was generated by Cursor. I claim no copyright or liability on the content.
| #!/usr/bin/env python | |
| """structlog & stdlib logging integration demo. | |
| This script demonstrates the minimum amount of code required to: | |
| 1. Configure *structlog* so that all of its log entries are rendered as JSON. | |
| 2. Capture **standard library** logging calls and render them using the **same** | |
| structured format – without changing code that relies on the ``logging`` | |
| module. | |
| 3. Offer a small CLI using *argparse* to choose the desired log-level. | |
| Run it like so:: | |
| python structlog_stdlib_demo.py --log-level DEBUG | |
| You should see both structlog and stdlib log calls rendered as JSON. | |
| """ | |
| from __future__ import annotations | |
| import argparse | |
| import logging | |
| import sys | |
| from typing import Final | |
| import structlog | |
| # --------------------------------------------------------------------------- | |
| # Logging configuration helpers | |
| # --------------------------------------------------------------------------- | |
| def _configure_logging(log_level: int) -> None: | |
| """Configure *structlog* and root stdlib logger. | |
| Args: | |
| log_level: Numeric log-level (e.g. ``logging.INFO``). | |
| """ | |
| # --- Common processors -------------------------------------------------- | |
| timestamper: Final = structlog.processors.TimeStamper(fmt="iso") | |
| pre_chain: Final[list] = [ | |
| structlog.stdlib.add_log_level, | |
| timestamper, | |
| ] | |
| # --- Stdlib logging setup ---------------------------------------------- | |
| # We send everything to stdout so it plays nicely with most CLIs. | |
| handler = logging.StreamHandler(sys.stdout) | |
| handler.setLevel(log_level) | |
| # The *ProcessorFormatter* ensures that *structlog* renders stdlib records | |
| # using the same processors defined below. | |
| handler.setFormatter( | |
| structlog.stdlib.ProcessorFormatter( | |
| processor=structlog.processors.JSONRenderer(), | |
| foreign_pre_chain=pre_chain, | |
| ) | |
| ) | |
| root_logger = logging.getLogger() | |
| root_logger.handlers.clear() # Remove any default handlers (incl. basicConfig). | |
| root_logger.addHandler(handler) | |
| root_logger.setLevel(log_level) | |
| # --- structlog setup ---------------------------------------------------- | |
| structlog.configure( | |
| processors=[ | |
| *pre_chain, | |
| # This wrapper passes the event dictionary to the ProcessorFormatter | |
| # so we don’t double-render JSON. | |
| structlog.stdlib.ProcessorFormatter.wrap_for_formatter, | |
| ], | |
| context_class=dict, | |
| logger_factory=structlog.stdlib.LoggerFactory(), | |
| wrapper_class=structlog.make_filtering_bound_logger(log_level), | |
| cache_logger_on_first_use=True, | |
| ) | |
| # --------------------------------------------------------------------------- | |
| # Demo helpers | |
| # --------------------------------------------------------------------------- | |
| def some_library_function() -> None: | |
| """Pretend we’re inside a third-party library using stdlib *logging*.""" | |
| lib_logger = logging.getLogger("some_library") | |
| lib_logger.info("doing something in a library", extra={"library_event": "compute"}) | |
| lib_logger.error("library encountered an error", extra={"error_code": 1234}) | |
| # --------------------------------------------------------------------------- | |
| # Main CLI entry-point | |
| # --------------------------------------------------------------------------- | |
| def main() -> None: | |
| parser = argparse.ArgumentParser( | |
| description="Demo: structlog capturing and rendering stdlib logging calls.", | |
| ) | |
| parser.add_argument( | |
| "--log-level", | |
| default="INFO", | |
| choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"], | |
| help="Set the root log-level (default: %(default)s)", | |
| ) | |
| args = parser.parse_args() | |
| numeric_level = getattr(logging, args.log_level.upper()) | |
| _configure_logging(numeric_level) | |
| # ------------------------------------------------------------------ | |
| # structlog usage – this is how you’d normally create a logger. | |
| # ------------------------------------------------------------------ | |
| log = structlog.get_logger() | |
| log.info("started", cli_args=vars(args)) | |
| # Standard library logging call – will be rendered in JSON too. | |
| logging.info("this is stdlib logging", extra={"foo": "bar"}) | |
| # Simulate a function from another dependency that also uses stdlib logging. | |
| some_library_function() | |
| log.info("finished") | |
| if __name__ == "__main__": | |
| main() |
This walkthrough accompanies structlog_stdlib_demo.py.
If you already use structlog for structured (JSON) logging but aren’t sure how to
capture plain logging calls (from your own code or third-party libraries),
this guide is for you.
# Install the required dependency
pip install structlog
# Run the demo with DEBUG log-level
python structlog_stdlib_demo.py --log-level DEBUGYou should see all messages – whether produced via structlog or the
standard library’s logging module – emitted in the same JSON format.
Even if your application code has fully migrated to structlog, most third-party
packages (and sometimes your own legacy code) still call
logging.getLogger(...).info(...) and friends.
Without extra work you end up with two parallel logging systems and lose the
benefits of structured logs for anything that doesn’t speak structlog.
The solution is to teach the stdlib logging module to hand off its records
to structlog for rendering.
parser.add_argument("--log-level", choices=[...], default="INFO")Nothing fancy – we just capture the desired root log-level.
timestamper = structlog.processors.TimeStamper(fmt="iso")
pre_chain = [structlog.stdlib.add_log_level, timestamper]pre_chain contains processors that should always run – regardless of
whether the log originates from structlog or stdlib. Here we:
INFO, ERROR, …).handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
structlog.stdlib.ProcessorFormatter(
processor=structlog.processors.JSONRenderer(),
foreign_pre_chain=pre_chain,
)
)logging handler that writes to stdout.logging.Formatter with
structlog.stdlib.ProcessorFormatter.pre_chain so stdlib records get identical processing
(timestamp, level, …) before the final renderer runs.Finally we wipe any handlers that basicConfig() may have installed and attach
our custom handler:
root = logging.getLogger()
root.handlers.clear()
root.addHandler(handler)
root.setLevel(log_level)structlog.configure(
processors=[
*pre_chain,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
wrapper_class=structlog.make_filtering_bound_logger(log_level),
logger_factory=structlog.stdlib.LoggerFactory(),
cache_logger_on_first_use=True,
)Key point: wrap_for_formatter places the event dictionary on the
LogRecord so ProcessorFormatter (installed on the stdlib handler above)
can pick it up and render it once. Without this wrapper you’d end up with
double-encoded JSON.
structlog.get_logger().logging.getLogger().log = structlog.get_logger()
log.info("started") # structlog call
logging.info("plain log") # stdlib call
some_library_function() # stdlib inside another module{
"event": "started",
"level": "info",
"timestamp": "2024-05-29T14:42:16.932Z"
}
{
"event": "plain log",
"level": "info",
"foo": "bar",
"timestamp": "2024-05-29T14:42:16.933Z"
}
{
"event": "library encountered an error",
"level": "error",
"error_code": 1234,
"timestamp": "2024-05-29T14:42:16.934Z"
}JSONRenderer() for
KeyValueRenderer() or your own processor.pre_chain that
binds request_id/trace_id, etc.logging.FormatterHappy logging! 🎉
Output: