From 122caa61c278a75100d7c23fc601588cc0c60308 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Fri, 7 Feb 2025 10:26:52 -0500 Subject: [PATCH] fix(internal): move logging rate limiter to log filter (#12243) PGB-61 Fixes #4856 Improve and fix internal `ddtrace.internal.logger.get_logger` implementation. Today we enforce that all internal `ddtrace` loggers use a customer class implementation of `logging.Logger` to ensure we can apply custom logic like forwarding internal error logs to telemetry and rate limiting the volume of ddtrace logs allowed. However, the current implementation requires knowledge and use of internal `logging.getLogger` details, and it tries to replicate some of the behavior. This has caused us to miss a key lock around a shared resource that was expected. This change in implementation moves to apply our logic via a log filter instead of a customer logger class. A filter approach will provide the same logic with a much more simple approach which will be easier to reason and maintain which will avoid the bug/race condition found. The downside to this approach is that loggers can have multiple filters, and the first filter to determine that a log record should not be logged will prevent the other filters from running. This means if a user applies a log filter to one of our `ddtrace.*` loggers, then we may miss collecting and forwarding the internal errors to telemetry. Otherwise, if a user filter prevents a log from being logged, then it won't apply towards our rate limit (which is acceptable/correct). ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Yun Kim <35776586+Yun-Kim@users.noreply.github.com> Co-authored-by: Nicole Cybul Co-authored-by: Nick Ripley Co-authored-by: William Conti <58711692+wconti27@users.noreply.github.com> Co-authored-by: Christophe Papazian <114495376+christophe-papazian@users.noreply.github.com> Co-authored-by: Munir Abdinur Co-authored-by: Laplie Anderson --- ddtrace/internal/logger.py | 216 +++++----------- ...nternal-logging-init-6058c02b527cdf77.yaml | 4 + tests/tracer/test_logger.py | 244 +++++------------- 3 files changed, 126 insertions(+), 338 deletions(-) create mode 100644 releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml diff --git a/ddtrace/internal/logger.py b/ddtrace/internal/logger.py index 0592afa0a74..6f7b853c3dc 100644 --- a/ddtrace/internal/logger.py +++ b/ddtrace/internal/logger.py @@ -1,180 +1,80 @@ import collections import logging import os -import typing -from typing import Optional # noqa:F401 -from typing import cast # noqa:F401 +from typing import DefaultDict +from typing import Tuple -if typing.TYPE_CHECKING: - from typing import Any # noqa:F401 - from typing import DefaultDict # noqa:F401 - from typing import Tuple # noqa:F401 - - -def get_logger(name): - # type: (str) -> DDLogger +def get_logger(name: str) -> logging.Logger: """ - Retrieve or create a ``DDLogger`` instance. - - This function mirrors the behavior of `logging.getLogger`. + Retrieve or create a ``Logger`` instance with consistent behavior for internal use. - If no logger with the provided name has been fetched before then - a new one is created. + Configure all loggers with a rate limiter filter to prevent excessive logging. - If a previous logger has been created then it is returned. - - DEV: We do not want to mess with `logging.setLoggerClass()` - That will totally mess with the user's loggers, we want - just our own, selective loggers to be DDLoggers - - :param name: The name of the logger to fetch or create - :type name: str - :return: The logger instance - :rtype: ``DDLogger`` """ - # DEV: `logging.Logger.manager` refers to the single root `logging.Manager` instance - # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1824-L1826 # noqa:E501 - manager = logging.Logger.manager + logger = logging.getLogger(name) + # addFilter will only add the filter if it is not already present + logger.addFilter(log_filter) + return logger - # If the logger does not exist yet, create it - # DEV: `Manager.loggerDict` is a dict mapping logger name to logger - # DEV: This is a simplified version of `logging.Manager.getLogger` - # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1221-L1253 # noqa:E501 - # DEV: _fixupParents could be adding a placeholder, we want to replace it if that's the case - if name in manager.loggerDict: - logger = manager.loggerDict[name] - if isinstance(manager.loggerDict[name], logging.PlaceHolder): - placeholder = logger - logger = DDLogger(name=name) - manager.loggerDict[name] = logger - # DEV: `_fixupChildren` and `_fixupParents` have been around for awhile, - # DEV: but add the `hasattr` guard... just in case. - if hasattr(manager, "_fixupChildren"): - manager._fixupChildren(placeholder, logger) - if hasattr(manager, "_fixupParents"): - manager._fixupParents(logger) - else: - logger = DDLogger(name=name) - manager.loggerDict[name] = logger - if hasattr(manager, "_fixupParents"): - manager._fixupParents(logger) - # Return our logger - return cast(DDLogger, logger) +# Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped +LoggingBucket = collections.namedtuple("LoggingBucket", ("bucket", "skipped")) +# Dict to keep track of the current time bucket per name/level/pathname/lineno +_buckets: DefaultDict[Tuple[str, int, str, int], LoggingBucket] = collections.defaultdict(lambda: LoggingBucket(0, 0)) +# Allow 1 log record per name/level/pathname/lineno every 60 seconds by default +# Allow configuring via `DD_TRACE_LOGGING_RATE` +# DEV: `DD_TRACE_LOGGING_RATE=0` means to disable all rate limiting +_rate_limit = int(os.getenv("DD_TRACE_LOGGING_RATE", default=60)) -def hasHandlers(self): - # type: (DDLogger) -> bool - """ - See if this logger has any handlers configured. - Loop through all handlers for this logger and its parents in the - logger hierarchy. Return True if a handler was found, else False. - Stop searching up the hierarchy whenever a logger with the "propagate" - attribute set to zero is found - that will be the last logger which - is checked for the existence of handlers. - https://github.com/python/cpython/blob/8f192d12af82c4dc40730bf59814f6a68f68f950/Lib/logging/__init__.py#L1629 +def log_filter(record: logging.LogRecord) -> bool: """ - c = self - rv = False - while c: - if c.handlers: - rv = True - break - if not c.propagate: - break - else: - c = c.parent # type: ignore - return rv + Function used to determine if a log record should be outputted or not (True = output, False = skip). - -class DDLogger(logging.Logger): - """ - Custom rate limited logger used by ``ddtrace`` - - This logger class is used to rate limit the output of - log messages from within the ``ddtrace`` package. + This function will: + - Log all records with a level of ERROR or higher with telemetry + - Rate limit log records based on the logger name, record level, filename, and line number """ + if record.levelno >= logging.ERROR: + # avoid circular import + from ddtrace.internal import telemetry - # Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped - LoggingBucket = collections.namedtuple("LoggingBucket", ("bucket", "skipped")) - - def __init__(self, *args, **kwargs): - # type: (*Any, **Any) -> None - """Constructor for ``DDLogger``""" - super(DDLogger, self).__init__(*args, **kwargs) - - # Dict to keep track of the current time bucket per name/level/pathname/lineno - self.buckets = collections.defaultdict( - lambda: DDLogger.LoggingBucket(0, 0) - ) # type: DefaultDict[Tuple[str, int, str, int], DDLogger.LoggingBucket] + # currently we only have one error code + full_file_name = os.path.join(record.pathname, record.filename) + telemetry.telemetry_writer.add_error(1, record.msg % record.args, full_file_name, record.lineno) - # Allow 1 log record per name/level/pathname/lineno every 60 seconds by default - # Allow configuring via `DD_TRACE_LOGGING_RATE` - # DEV: `DD_TRACE_LOGGING_RATE=0` means to disable all rate limiting - rate_limit = os.getenv("DD_TRACE_LOGGING_RATE", default=None) - - if rate_limit is not None: - self.rate_limit = int(rate_limit) - else: - self.rate_limit = 60 - - def handle(self, record): - # type: (logging.LogRecord) -> None - """ - Function used to call the handlers for a log line. - - This implementation will first determine if this log line should - be logged or rate limited, and then call the base ``logging.Logger.handle`` - function if it should be logged - - DEV: This method has all of it's code inlined to reduce on functions calls - - :param record: The log record being logged - :type record: ``logging.LogRecord`` - """ - if record.levelno >= logging.ERROR: - # avoid circular import - from ddtrace.internal import telemetry - - # currently we only have one error code - full_file_name = os.path.join(record.pathname, record.filename) - telemetry.telemetry_writer.add_error(1, record.msg % record.args, full_file_name, record.lineno) - - # If rate limiting has been disabled (`DD_TRACE_LOGGING_RATE=0`) then apply no rate limit - # If the logging is in debug, then do not apply any limits to any log - if not self.rate_limit or self.getEffectiveLevel() == logging.DEBUG: - super(DDLogger, self).handle(record) - return + logger = logging.getLogger(record.name) + # If rate limiting has been disabled (`DD_TRACE_LOGGING_RATE=0`) then apply no rate limit + # If the logger is set to debug, then do not apply any limits to any log + if not _rate_limit or logger.getEffectiveLevel() == logging.DEBUG: + return True # Allow 1 log record by name/level/pathname/lineno every X seconds - # DEV: current unix time / rate (e.g. 300 seconds) = time bucket - # int(1546615098.8404942 / 300) = 515538 - # DEV: LogRecord `created` is a unix timestamp/float - # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` - current_bucket = int(record.created / self.rate_limit) - - # Limit based on logger name, record level, filename, and line number - # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137) - # This way each unique log message can get logged at least once per time period - # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` - key = (record.name, record.levelno, record.pathname, record.lineno) - - # Only log this message if the time bucket has changed from the previous time we ran - logging_bucket = self.buckets[key] - if logging_bucket.bucket != current_bucket: - # Append count of skipped messages if we have skipped some since our last logging - if logging_bucket.skipped: - record.msg = "{}, %s additional messages skipped".format(record.msg) - record.args = record.args + (logging_bucket.skipped,) # type: ignore - + # DEV: current unix time / rate (e.g. 300 seconds) = time bucket + # int(1546615098.8404942 / 300) = 515538 + # DEV: LogRecord `created` is a unix timestamp/float + # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` + current_bucket = int(record.created / _rate_limit) + # Limit based on logger name, record level, filename, and line number + # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137) + # This way each unique log message can get logged at least once per time period + # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` + key = (record.name, record.levelno, record.pathname, record.lineno) + # Only log this message if the time bucket has changed from the previous time we ran + logging_bucket = _buckets[key] + if logging_bucket.bucket != current_bucket: + # Append count of skipped messages if we have skipped some since our last logging + if logging_bucket.skipped: + record.msg = "{}, %s additional messages skipped".format(record.msg) + record.args = record.args + (logging_bucket.skipped,) # type: ignore # Reset our bucket - self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0) - - # Call the base handle to actually log this record - super(DDLogger, self).handle(record) - else: - # Increment the count of records we have skipped - # DEV: `self.buckets[key]` is a tuple which is immutable so recreate instead - self.buckets[key] = DDLogger.LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1) + _buckets[key] = LoggingBucket(current_bucket, 0) + # Actually log this record + return True + # Increment the count of records we have skipped + # DEV: `buckets[key]` is a tuple which is immutable so recreate instead + _buckets[key] = LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1) + # Skip this log message + return False diff --git a/releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml b/releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml new file mode 100644 index 00000000000..132e7efaab5 --- /dev/null +++ b/releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + internal: Fix ``ddtrace`` internal logger initialization mutating an unlocked shared resource. diff --git a/tests/tracer/test_logger.py b/tests/tracer/test_logger.py index 00edeeb18b2..55a712d1ac0 100644 --- a/tests/tracer/test_logger.py +++ b/tests/tracer/test_logger.py @@ -3,7 +3,8 @@ import mock import pytest -from ddtrace.internal.logger import DDLogger +import ddtrace.internal.logger +from ddtrace.internal.logger import LoggingBucket from ddtrace.internal.logger import get_logger from tests.utils import BaseTestCase @@ -11,22 +12,28 @@ ALL_LEVEL_NAMES = ("debug", "info", "warning", "error", "exception", "critical", "fatal") -class DDLoggerTestCase(BaseTestCase): +class LoggerTestCase(BaseTestCase): def setUp(self): - super(DDLoggerTestCase, self).setUp() + super(LoggerTestCase, self).setUp() - self.root = logging.root - self.manager = self.root.manager + self.manager = logging.root.manager + + # Reset to default values + ddtrace.internal.logger._buckets.clear() + ddtrace.internal.logger._rate_limit = 60 def tearDown(self): # Weeee, forget all existing loggers logging.Logger.manager.loggerDict.clear() self.assertEqual(logging.Logger.manager.loggerDict, dict()) - self.root = None self.manager = None - super(DDLoggerTestCase, self).tearDown() + # Reset to default values + ddtrace.internal.logger._buckets.clear() + ddtrace.internal.logger._rate_limit = 60 + + super(LoggerTestCase, self).tearDown() def _make_record( self, @@ -42,42 +49,31 @@ def _make_record( ): return logger.makeRecord(logger.name, level, fn, lno, msg, args, exc_info, func, extra) - @mock.patch("ddtrace.internal.logger.DDLogger.handle") - def assert_log_records(self, log, expected_levels, handle): - for name in ALL_LEVEL_NAMES: - method = getattr(log, name) - method("test") - - records = [args[0][0] for args in handle.call_args_list] - for record in records: - self.assertIsInstance(record, logging.LogRecord) - self.assertTrue("test.logger" in record.name or "ddtrace" in record.name) - - levels = [r.levelname for r in records] - self.assertEqual(levels, expected_levels) - def test_get_logger(self): """ When using `get_logger` to get a logger When the logger does not exist - We create a new DDLogger + We create a new logging.Logger When the logger exists We return the expected logger When a different logger is requested - We return a new DDLogger + We return a new logging.Logger When a Placeholder exists - We return DDLogger + We return logging.Logger """ + assert self.manager is not None + # Assert the logger doesn't already exist self.assertNotIn("test.logger", self.manager.loggerDict) # Fetch a new logger log = get_logger("test.logger") + assert ddtrace.internal.logger.log_filter in log.filters self.assertEqual(log.name, "test.logger") self.assertEqual(log.level, logging.NOTSET) - # Ensure it is a DDLogger - self.assertIsInstance(log, DDLogger) + # Ensure it is a logging.Logger + self.assertIsInstance(log, logging.Logger) # Make sure it is stored in all the places we expect self.assertEqual(self.manager.getLogger("test.logger"), log) self.assertEqual(self.manager.loggerDict["test.logger"], log) @@ -93,163 +89,49 @@ def test_get_logger(self): self.assertNotEqual(log, new_log) # If a PlaceHolder is in place of the logger - # We should return the DDLogger + # We should return the logging.Logger self.assertIsInstance(self.manager.loggerDict["new.test"], logging.PlaceHolder) log = get_logger("new.test") self.assertEqual(log.name, "new.test") - self.assertIsInstance(log, DDLogger) - - def test_get_logger_children(self): - """ - When using `get_logger` to get a logger - We appropriately assign children loggers - - DEV: This test case is to ensure we are calling `manager._fixupChildren(logger)` - """ - root = get_logger("test") - root.setLevel(logging.WARNING) - - child_logger = get_logger("test.newplaceholder.long.component") - self.assertEqual(child_logger.parent, root) - - parent_logger = get_logger("test.newplaceholder") - self.assertEqual(child_logger.parent, parent_logger) - - parent_logger.setLevel(logging.INFO) - # Because the child logger's level remains unset, it should inherit - # the level of its closest parent, which is INFO. - # If we did not properly maintain the logger tree, this would fail - # because child_logger would be set to the default when it was created - # which was logging.WARNING. - self.assertEqual(child_logger.getEffectiveLevel(), logging.INFO) - - # Clean up for future tests. - root.setLevel(logging.NOTSET) - - def test_get_logger_parents(self): - """ - When using `get_logger` to get a logger - We appropriately assign parent loggers - - DEV: This test case is to ensure we are calling `manager._fixupParents(logger)` - """ - # Fetch a new logger - test_log = get_logger("test") - self.assertEqual(test_log.parent, self.root) - - # Fetch a new child log - # Auto-associate with parent `test` logger - child_log = get_logger("test.child") - self.assertEqual(child_log.parent, test_log) - - # Deep child - deep_log = get_logger("test.child.logger.from.test.case") - self.assertEqual(deep_log.parent, child_log) - - def test_logger_init(self): - """ - When creating a new DDLogger - Has the same interface as logging.Logger - Configures a defaultdict for buckets - Properly configures the rate limit - """ - # Create a logger - log = DDLogger("test.logger") - - # Ensure we set the name and use default log level - self.assertEqual(log.name, "test.logger") - self.assertEqual(log.level, logging.NOTSET) - - # Assert DDLogger default properties - self.assertIsInstance(log.buckets, dict) - self.assertEqual(log.rate_limit, 60) - - # Assert manager and parent - # DEV: Parent is `None` because `manager._findParents()` doesn't get called - # unless we use `get_logger` (this is the same behavior as `logging.getLogger` and `Logger('name')`) - self.assertEqual(log.manager, self.manager) - self.assertIsNone(log.parent) - - # Override rate limit from environment variable - with self.override_env(dict(DD_TRACE_LOGGING_RATE="10")): - log = DDLogger("test.logger") - self.assertEqual(log.rate_limit, 10) - - # Set specific log level - log = DDLogger("test.logger", level=logging.DEBUG) - self.assertEqual(log.level, logging.DEBUG) - - def test_logger_log(self): - """ - When calling `DDLogger` log methods - We call `DDLogger.handle` with the expected log record - """ - log = get_logger("test.logger") - - # -- NOTSET - # By default no level is set so we only get warn, error, and critical messages - self.assertEqual(log.level, logging.NOTSET) - # `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - # -- CRITICAL - log.setLevel(logging.CRITICAL) - # `log.critical`, `log.fatal` - self.assert_log_records(log, ["CRITICAL", "CRITICAL"]) - - # -- ERROR - log.setLevel(logging.ERROR) - # `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - # -- WARN - log.setLevel(logging.WARN) - # `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) + self.assertIsInstance(log, logging.Logger) - # -- INFO - log.setLevel(logging.INFO) - # `log.info`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["INFO", "WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - # -- DEBUG - log.setLevel(logging.DEBUG) - # `log.debug`, `log.info`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["DEBUG", "INFO", "WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - @mock.patch("logging.Logger.handle") - def test_logger_handle_no_limit(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_no_limit(self, call_handlers): """ - Calling `DDLogger.handle` + Calling `logging.Logger.handle` When no rate limit is set Always calls the base `Logger.handle` """ # Configure an INFO logger with no rate limit log = get_logger("test.logger") log.setLevel(logging.INFO) - log.rate_limit = 0 + ddtrace.internal.logger._rate_limit = 0 # Log a bunch of times very quickly (this is fast) for _ in range(1000): log.info("test") # Assert that we did not perform any rate limiting - self.assertEqual(base_handle.call_count, 1000) + self.assertEqual(call_handlers.call_count, 1000) # Our buckets are empty - self.assertEqual(log.buckets, dict()) + self.assertEqual(ddtrace.internal.logger._buckets, dict()) - @mock.patch("logging.Logger.handle") - def test_logger_handle_debug(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_debug(self, call_handlers): """ - Calling `DDLogger.handle` + Calling `logging.Logger.handle` When effective level is DEBUG Always calls the base `Logger.handle` """ + # Our buckets are empty + self.assertEqual(ddtrace.internal.logger._buckets, dict()) + # Configure an INFO logger with no rate limit log = get_logger("test.logger") log.setLevel(logging.DEBUG) - assert log.rate_limit > 0 + assert log.getEffectiveLevel() == logging.DEBUG + assert ddtrace.internal.logger._rate_limit > 0 # Log a bunch of times very quickly (this is fast) for level in ALL_LEVEL_NAMES: @@ -259,15 +141,15 @@ def test_logger_handle_debug(self, base_handle): # Assert that we did not perform any rate limiting total = 1000 * len(ALL_LEVEL_NAMES) - self.assertTrue(total <= base_handle.call_count <= total + 1) + self.assertTrue(total <= call_handlers.call_count <= total + 1) # Our buckets are empty - self.assertEqual(log.buckets, dict()) + self.assertEqual(ddtrace.internal.logger._buckets, dict()) - @mock.patch("logging.Logger.handle") - def test_logger_handle_bucket(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_bucket(self, call_handlers): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` With a record We pass it to the base `Logger.handle` We create a bucket for tracking @@ -279,22 +161,22 @@ def test_logger_handle_bucket(self, base_handle): log.handle(record) # We passed to base Logger.handle - base_handle.assert_called_once_with(record) + call_handlers.assert_called_once_with(record) # We added an bucket entry for this record key = (record.name, record.levelno, record.pathname, record.lineno) - logging_bucket = log.buckets.get(key) - self.assertIsInstance(logging_bucket, DDLogger.LoggingBucket) + logging_bucket = ddtrace.internal.logger._buckets.get(key) + self.assertIsInstance(logging_bucket, LoggingBucket) # The bucket entry is correct - expected_bucket = int(record.created / log.rate_limit) + expected_bucket = int(record.created / ddtrace.internal.logger._rate_limit) self.assertEqual(logging_bucket.bucket, expected_bucket) self.assertEqual(logging_bucket.skipped, 0) - @mock.patch("logging.Logger.handle") - def test_logger_handle_bucket_limited(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_bucket_limited(self, call_handlers): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` With multiple records in a single time frame We pass only the first to the base `Logger.handle` We keep track of the number skipped @@ -302,7 +184,8 @@ def test_logger_handle_bucket_limited(self, base_handle): log = get_logger("test.logger") # Create log record and handle it - first_record = self._make_record(log, msg="first") + record = self._make_record(log, msg="first") + first_record = record log.handle(first_record) for _ in range(100): @@ -312,21 +195,22 @@ def test_logger_handle_bucket_limited(self, base_handle): log.handle(record) # We passed to base Logger.handle - base_handle.assert_called_once_with(first_record) + call_handlers.assert_called_once_with(first_record) # We added an bucket entry for these records key = (record.name, record.levelno, record.pathname, record.lineno) - logging_bucket = log.buckets.get(key) + logging_bucket = ddtrace.internal.logger._buckets.get(key) + assert logging_bucket is not None # The bucket entry is correct - expected_bucket = int(first_record.created / log.rate_limit) + expected_bucket = int(first_record.created / ddtrace.internal.logger._rate_limit) self.assertEqual(logging_bucket.bucket, expected_bucket) self.assertEqual(logging_bucket.skipped, 100) - @mock.patch("logging.Logger.handle") - def test_logger_handle_bucket_skipped_msg(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_bucket_skipped_msg(self, call_handlers): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` When a bucket exists for a previous time frame We pass only the record to the base `Logger.handle` We update the record message to include the number of skipped messages @@ -340,15 +224,15 @@ def test_logger_handle_bucket_skipped_msg(self, base_handle): # Create a bucket entry for this record key = (record.name, record.levelno, record.pathname, record.lineno) - bucket = int(record.created / log.rate_limit) + bucket = int(record.created / ddtrace.internal.logger._rate_limit) # We want the time bucket to be for an older bucket - log.buckets[key] = DDLogger.LoggingBucket(bucket=bucket - 1, skipped=20) + ddtrace.internal.logger._buckets[key] = LoggingBucket(bucket=bucket - 1, skipped=20) # Handle our record log.handle(record) # We passed to base Logger.handle - base_handle.assert_called_once_with(record) + call_handlers.assert_called_once_with(record) self.assertEqual(record.msg, original_msg + ", %s additional messages skipped") self.assertEqual(record.args, original_args + (20,)) @@ -356,7 +240,7 @@ def test_logger_handle_bucket_skipped_msg(self, base_handle): def test_logger_handle_bucket_key(self): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` With different log messages We use different buckets to limit them """ @@ -388,7 +272,7 @@ def get_key(record): all_records = (record1, record2, record3, record4, record5, record6) [log.handle(record) for record in all_records] - buckets = log.buckets + buckets = ddtrace.internal.logger._buckets # We have 6 records but only end up with 5 buckets self.assertEqual(len(buckets), 5)