import os import warnings import logging import time from typing import Optional, Tuple logging_level = os.getenv("LOGGING_LEVEL", "INFO").upper() class RelativePathFormatter(logging.Formatter): def __init__( self, fmt: Optional[str] = None, datefmt: Optional[str] = None, remove_prefix: Optional[str] = None, ) -> None: super().__init__(fmt, datefmt) self.remove_prefix = remove_prefix or os.getcwd() # Ensure the prefix ends with a separator if not self.remove_prefix.endswith(os.sep): self.remove_prefix += os.sep def format(self, record: logging.LogRecord) -> str: """Create a shallow copy of the record and rewrite the pathname to be relative to the configured prefix. Defensive checks are used to satisfy static type checkers. """ # Make a copy of the record dict so we don't mutate the caller's record record_dict = record.__dict__.copy() new_record = logging.makeLogRecord(record_dict) # Remove the prefix from pathname if present pathname = getattr(new_record, "pathname", "") if pathname.startswith(self.remove_prefix): new_record.pathname = pathname[len(self.remove_prefix) :] return super().format(new_record) def _setup_logging(level: str=logging_level) -> logging.Logger: os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" warnings.filterwarnings( "ignore", message="Overriding a previously registered kernel" ) warnings.filterwarnings("ignore", message="Warning only once for all operators") warnings.filterwarnings("ignore", message=".*Couldn't find ffmpeg or avconv.*") warnings.filterwarnings("ignore", message="'force_all_finite' was renamed to") warnings.filterwarnings("ignore", message="n_jobs value 1 overridden") warnings.filterwarnings("ignore", message=".*websocket.*is deprecated") logging.getLogger("aiortc").setLevel(logging.INFO) logging.getLogger("aioice").setLevel(logging.INFO) logging.getLogger("asyncio").setLevel(logging.INFO) numeric_level = getattr(logging, level.upper(), None) if not isinstance(numeric_level, int): raise ValueError(f"Invalid log level: {level}") # Create a custom formatter formatter = RelativePathFormatter( fmt="%(levelname)s - %(pathname)s:%(lineno)d - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) # Create a handler (e.g., StreamHandler for console output) handler = logging.StreamHandler() handler.setFormatter(formatter) # Simple repeat-suppression filter: if the exact same message (level+text) # appears repeatedly within a short window, drop duplicates. This keeps # the first occurrence for diagnostics but avoids log flooding from # recurring asyncio/aioice stack traces. class _RepeatFilter(logging.Filter): def __init__(self, interval: float = 5.0) -> None: super().__init__() self._interval = interval self._last: Optional[Tuple[int, str]] = None self._last_time: float = 0.0 def filter(self, record: logging.LogRecord) -> bool: try: msg = record.getMessage() except Exception: # Fallback to a string representation if getMessage fails msg = str(record) key: Tuple[int, str] = (getattr(record, "levelno", 0), msg) now = time.time() if self._last == key and (now - self._last_time) < self._interval: return False self._last = key self._last_time = now return True handler.addFilter(_RepeatFilter()) # Configure root logger logging.basicConfig( level=numeric_level, handlers=[handler], # Use only your handler force=True, ) # Set levels for noisy loggers for noisy_logger in ( "uvicorn", "uvicorn.error", "uvicorn.access", "fastapi", "starlette", ): logger = logging.getLogger(noisy_logger) logger.setLevel(logging.WARNING) logger.handlers = [] # Remove default handlers logger.addHandler(handler) # Add your custom handler logger = logging.getLogger(__name__) return logger logger = _setup_logging(level=logging_level) logger.debug(f"Logging initialized with level: {logging_level}")