"""Custom logging handler for observatory operations with database storage.
This module provides a specialized logging handler that extends Python's standard
logging.Handler to provide dual-output logging: console display and database storage.
It's designed specifically for observatory automation systems where logging events
need to be both immediately visible and persistently stored for analysis.
Key features:
- Dual logging output (console and database)
- Error state tracking for the parent instance
- Automatic timestamp formatting with microsecond precision
- Exception and stack trace capture
- SQL injection protection through quote escaping
- UTC timezone standardization
The handler is particularly useful for long-running observatory operations where:
- Real-time monitoring of system status is required
- Historical log analysis is needed for debugging
- Error states need to be tracked at the instance level
- Database queries on log data are necessary
Typical usage:
>>> from astra.logger import ObservatoryLogger, DatabaseLoggingHandler
>>> from astra.database_manager import DatabaseManager
>>> observatory_name = 'MyObservatory'
>>> db_manager = DatabaseManager(observatory_name)
>>> logger = ObservatoryLogger(observatory_name)
>>> logger.addHandler(DatabaseLoggingHandler(db_manager))
Note:
The handler expects the instance to have 'error_free' attribute and 'cursor' attribute.
"""
import logging
import sys
import traceback
from datetime import UTC, datetime
from pathlib import Path
from typing import Any, Literal, Optional, Protocol
[docs]
class DatabaseManagerProtocol(Protocol):
[docs]
def execute(self, query: str) -> Any: ...
[docs]
class ObservatoryLogger(logging.Logger):
"""Custom logger for observatory operations with error tracking.
Attributes:
error_source (list): List to track sources of errors.
error_free (bool): Flag indicating if the logger has encountered errors.
"""
def __init__(
self,
name: str,
error_source: list | None = None,
error_free: bool = True,
level=logging.INFO,
) -> None:
super().__init__(name, level=level)
self.error_source = [] if error_source is None else error_source
self.error_free = error_free
[docs]
def error(self, msg, *args, **kwargs):
"""Overrides logging.Logger.error to set error_free to False."""
self.error_free = False
super().error(msg, *args, **kwargs)
[docs]
def critical(self, msg, *args, **kwargs):
"""Overrides logging.Logger.critical to set error_free to False."""
self.error_free = False
super().critical(msg, *args, **kwargs)
[docs]
def report_device_issue(
self,
device_type: str,
device_name: str,
message: str,
exception: Optional[Exception] = None,
exc_info: bool = True,
level: Literal["error", "warning"] = "error",
) -> None:
"""Logs device-specific issues and tracks error sources."""
error = f"{device_type} {device_name}: {message}" + (
f". Exception: {str(exception)}" if exception is not None else ""
)
self.error_source.append(
{"device_type": device_type, "device_name": device_name, "error": error}
)
if level == "warning":
self.warning(error, exc_info=exc_info)
else:
self.error(error, exc_info=exc_info)
[docs]
class DatabaseLoggingHandler(logging.Handler):
"""Custom logging handler for dual-output to console and database.
Extends Python's standard logging.Handler to provide specialized logging
for observatory automation systems. Simultaneously outputs log messages
to console for real-time monitoring and stores them in database for
persistent storage and analysis.
Attributes:
database_manager (DatabaseManager): Instance managing database operations,
specifically database_manager.execute.
"""
def __init__(self, database_manager: DatabaseManagerProtocol) -> None:
logging.Handler.__init__(self)
self.database_manager = database_manager
[docs]
def emit(self, record: logging.LogRecord) -> None:
"""Process and emit a log record to console and database.
This method is called automatically by the logging framework when a log
message is generated. It formats the record for console output, tracks
error states in the parent instance, and stores the record in the database.
Args:
record: The log record to be processed and emitted.
Note:
If the log level is ERROR or higher, sets instance.error_free to False.
All log records are stored in the 'log' database table with timestamp,
level, module, function, line number, and message.
"""
dt_str = datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
level = record.levelname.lower()
message = record.msg if isinstance(record.msg, str) else str(record.msg)
if record.exc_info:
message += "\n" + "".join(traceback.format_exception(*record.exc_info))
if record.stack_info:
message += "\n" + record.stack_info
# make message safe for sql
message = message.replace("'", "''")
try:
self.database_manager.execute(
f"INSERT INTO log VALUES ('{dt_str}', '{level}', '{message}')"
)
except Exception as e:
print(f"Failed to log to database: {e}")
[docs]
class ConsoleStreamHandler(logging.StreamHandler):
def __init__(self, log_traceback: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.setFormatter(CustomFormatter())
self.log_traceback = log_traceback
[docs]
def emit(self, record: logging.LogRecord) -> None:
try:
msg = self.format(record)
if self.stream is not None and not self.stream.closed:
self.stream.write(msg + self.terminator)
self.flush()
else:
print(f"Stream closed. Log: {msg}")
except Exception:
self.handleError(record)
[docs]
@classmethod
def attach(
cls,
logger: logging.Logger,
level: int = logging.INFO,
propagate: bool = False,
remove_other_handlers: bool = False,
) -> None:
"""Ensure a `ConsoleStreamHandler` is attached to `logger`.
This convenience classmethod ensures that the given ``logger`` has a
`ConsoleStreamHandler` attached configured at the requested ``level``.
Parameters:
- logger: Logger to configure.
- level: Logging level to set on the logger and handler (default
``logging.INFO``).
- propagate: Whether log records should propagate to ancestor loggers.
- remove_other_handlers: If True, remove non-console handlers from ``logger``
before adding the console handler.
Note:
This method intentionally does not attach handlers to the root
logger to avoid interfering with other frameworks (for example,
Uvicorn's logging configuration).
"""
if remove_other_handlers:
for handler in logger.handlers:
if not isinstance(handler, ConsoleStreamHandler):
logger.removeHandler(handler)
if not any(isinstance(h, ConsoleStreamHandler) for h in logger.handlers):
logger.setLevel(level)
console_handler = ConsoleStreamHandler()
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.propagate = propagate
[docs]
class FileHandler(logging.FileHandler):
FORMAT = "%(levelname)s,%(asctime)s.%(msecs)03d,%(process)d,%(name)s,(%(filename)s:%(lineno)d),%(message)s"
DATEFMT = "%Y-%m-%d %H:%M:%S"
def __init__(
self, filename: str | Path, log_traceback: bool = True, **kwargs
) -> None:
super().__init__(filename, **kwargs)
self.log_traceback = log_traceback
self.setFormatter(logging.Formatter(self.FORMAT, self.DATEFMT))
self.setLevel(logging.ERROR)
[docs]
def emit(self, record: logging.LogRecord) -> None:
try:
msg = self.format(record)
if self.stream is not None and not self.stream.closed:
self.stream.write(msg + self.terminator)
self.flush()
else:
print(f"Stream closed. Log: {msg}")
except Exception:
self.handleError(record)