How can I retrieve loguru descriptive traceback?
I want to retrieve the loguru descriptive traceback when doing the serialized logging.
Is there is a function like traceback.format_exception(*error) but from loguru how do I get the descriptive traceback.
Here is my current setup:
import json
import sys
import traceback
import loguru
from loguru import logger
PIPELINE_NAME = "test"
SERIALIZED_LOGGING = True
def serialize(record):
"""Serialize to our custom needs for structured logging.
With `logger.exception(msg)` all fields of `exception` key is populated.
With `logger.error(msg)` `traceback` field remains unpopulated.
Anything passed as kwargs will also be appended to the log record.
Additionally, if logger.x("@message", show_exception_value=False) will not
serialize exception's original value and will make it None. Useful in case of
BulkWriteError which throws whole duplicate document.
Args:
record: a dict record of loguru
Returns:
a JSON encoded str.
"""
error: loguru.RecordException = record["exception"]
error_by_default = sys.exc_info() # logger.error
pipeline: str | None = record["extra"].get("pipeline", None)
show_exception_value: bool = record["extra"].get("show_exception_value", True)
level: str = record["level"].name
extra = record["extra"]
extra.update({"pipeline": pipeline})
if error: # only set when exception.
exception = {
"type": error.type.__name__,
"value": str(error.value) if show_exception_value else None,
"traceback": "".join(traceback.format_exception(*error)),
}
elif error_by_default[0]: # whenever error occurs
_type, _value, _ = sys.exc_info()
exception = {
"type": _type.__name__,
"value": str(_value) if show_exception_value else None,
"traceback": None,
}
else:
exception = None
to_serialize = {
"level": level,
# "time": record["time"].strftime("%d-%m-%YT%H:%M:%SZ"),
"message": record["message"],
"pipeline": pipeline,
"exception": exception,
}
to_serialize.update(extra)
return json.dumps(to_serialize)
def patching(record):
"""Patch the logger."""
record["extra"]["serialized"] = serialize(record)
def get_contextualized_logger(
pipeline_name: str = PIPELINE_NAME, default_logger=logger
):
"""Generates a contextualized logger with pipeline_name (P1/P2/P3)."""
if not SERIALIZED_LOGGING:
return default_logger
default_logger.remove()
default_logger = default_logger.patch(patching)
default_logger.add(
sink=sys.stderr,
serialize=False, # custom serialization requires this to be False
backtrace=True,
diagnose=False, # enable for debugging; shows runtime vals of call func
level="INFO",
format="{extra[serialized]}",
)
return default_logger
There is currently no way to retrieve the formatted traceback. :/
Maybe you can use better_exceptions instead form which Loguru formatting is derived?
There is currently no way to retrieve the formatted traceback. :/
Maybe you can use
better_exceptionsinstead form which Loguru formatting is derived?
I'll check it out soon and update it. Thanks!
using loguru functions i found this way to retrieve the formatted traceback message as string
from loguru._better_exceptions import ExceptionFormatter
f = ExceptionFormatter(backtrace=True, diagnose=True)
formatted_output = '\n'.join(f.format_exception(type(e), e, e.__traceback__)]))
I got it working, and with a little bit of improvement, Colored JSON logs.
import json
import sys
import traceback
import os
import re
import loguru
from loguru import logger
from loguru._better_exceptions import ExceptionFormatter
PIPELINE_NAME = "P1"
def colorize_json_string(json_str):
"""
Apply colorization to a JSON string after it's been serialized.
Colorize message based on the color of the level.
"""
# Define color codes
reset = "\033[0m"
green = "\033[32m" # For timestamp and success level
cyan = "\033[36m" # For DEBUG level and paths
white = "\033[37m" # For INFO level
yellow = "\033[33m" # For WARNING level
red = "\033[31m" # For ERROR level
magenta = "\033[35m" # For CRITICAL level
white_on_red = "\033[37;41m" # For CRITICAL level
# Find and colorize the timestamp
json_str = re.sub(r'("time": ")([^"]+)(")',
rf'\1{green}\2{reset}\3', json_str)
# Extract the level before colorizing to determine message color
level_match = re.search(r'"level": "([^"]+)"', json_str)
level_color = white # Default color
if level_match:
level = level_match.group(1)
if level == "DEBUG":
level_color = cyan
elif level == "INFO":
level_color = white
elif level == "WARNING":
level_color = yellow
elif level == "ERROR":
level_color = red
elif level == "SUCCESS":
level_color = green
elif level == "CRITICAL":
level_color = white_on_red
# Find and colorize the log level
json_str = re.sub(r'("level": ")DEBUG(")', rf'\1{cyan}DEBUG{reset}\2', json_str)
json_str = re.sub(r'("level": ")INFO(")', rf'\1{white}INFO{reset}\2', json_str)
json_str = re.sub(r'("level": ")WARNING(")', rf'\1{yellow}WARNING{reset}\2', json_str)
json_str = re.sub(r'("level": ")ERROR(")', rf'\1{red}ERROR{reset}\2', json_str)
json_str = re.sub(r'("level": ")SUCCESS(")', rf'\1{green}SUCCESS{reset}\2', json_str)
json_str = re.sub(r'("level": ")CRITICAL(")', rf'\1{white_on_red}CRITICAL{reset}\2', json_str)
# Find and colorize the message using the level color
json_str = re.sub(r'("message": ")(.*?)(")', rf'\1{level_color}\2{reset}\3', json_str)
# Find and colorize the path
json_str = re.sub(r'("path": ")(.*?)(")', rf'\1{cyan}\2{reset}\3', json_str)
# Find and colorize exceptions
json_str = re.sub(r'("type": ")(.*?)(")', rf'\1{red}\2{reset}\3', json_str)
json_str = re.sub(r'("value": ")(.*?)(")', rf'\1{red}\2{reset}\3', json_str)
return json_str
def serialize(record):
"""Serialize with datetime, path info, and apply colorization to the JSON string."""
# Extract datetime
timestamp = record["time"].isoformat(timespec='milliseconds')
# Extract file path, module, function and line info
file_path = record["file"].path
module_name = record["module"]
function_name = record["function"]
line_number = record["line"]
# Special handling for Jupyter notebooks
if module_name.isdigit() or "ipython-input" in str(file_path).lower():
# Check if we're in a Jupyter notebook
try:
# Try to get the notebook name if possible
import IPython
notebook_path = IPython.get_ipython().kernel.session.config.get('IPKernelApp', {}).get('connection_file', '')
if notebook_path:
notebook_name = os.path.basename(notebook_path).split('.', 1)[0]
module_name = f"jupyter.{notebook_name}"
else:
module_name = "__main__"
except (ImportError, AttributeError):
module_name = "__main__" # Fallback name for Jupyter environments
path_info = f"{module_name}:{function_name}:{line_number}"
# Get log level
level = record["level"].name
# Extract other info
error: loguru.RecordException = record["exception"]
error_by_default = sys.exc_info() # logger.error
pipeline: str | None = record["extra"].get("pipeline", None)
show_exception_value: bool = record["extra"].get("show_exception_value", True)
extra = record["extra"].copy()
extra.update({"pipeline": pipeline})
# Process exception info
if error: # only set when exception.
exc_type, exc_value, exc_tb = error.type, error.value, error.traceback
# Use ExceptionFormatter directly with the specific error components
formatter = ExceptionFormatter(backtrace=True, diagnose=True, colorize=True)
formatted_traceback = formatter.format_exception(exc_type, exc_value, exc_tb)
exception = {
"type": exc_type.__name__,
"value": str(exc_value) if show_exception_value else None,
"traceback": "".join(formatted_traceback),
}
elif error_by_default[0]: # whenever error occurs
_type, _value, _ = sys.exc_info()
exception = {
"type": _type.__name__,
"value": str(_value) if show_exception_value else None,
"traceback": None,
}
else:
exception = None
# Prepare data for serialization
to_serialize = {
"time": timestamp,
"level": level,
"path": path_info,
"message": record["message"],
"pipeline": pipeline,
"exception": exception,
}
# Add other extra fields
for key, value in extra.items():
if key not in ("pipeline", "serialized", "show_exception_value"):
to_serialize[key] = value
# Convert to JSON string
json_str = json.dumps(to_serialize)
# Colorize the JSON string
return colorize_json_string(json_str)
def patching(record):
"""Patch the logger."""
record["extra"]["serialized"] = serialize(record)
def get_contextualized_logger(
pipeline_name: str = PIPELINE_NAME, default_logger=logger
):
"""Generates a contextualized logger with pipeline_name."""
if not SERIALIZED_LOGGING: # Replace with your SERIALIZED_LOGGING variable
return default_logger
default_logger.remove()
default_logger = default_logger.patch(patching)
default_logger.add(
sink=sys.stderr,
colorize=False,
serialize=False, # custom serialization requires this to be False
backtrace=True,
diagnose=True,
level="INFO",
format="{extra[serialized]}",
)
return default_logger.bind(pipeline=pipeline_name)