diff --git a/.github/COMMIT_TEMPLATE.txt b/.github/COMMIT_TEMPLATE.txt index f6e418cc2e8..55dc46d5378 100644 --- a/.github/COMMIT_TEMPLATE.txt +++ b/.github/COMMIT_TEMPLATE.txt @@ -29,5 +29,5 @@ feat/fix/docs/refactor/ci(xxx): commit title here # mysqlpython, openai, opentelemetry, opentracer, profile, psycopg, pylibmc, pymemcache, # pymongo, pymysql, pynamodb, pyodbc, pyramid, pytest, redis, rediscluster, requests, rq, # sanic, snowflake, sourcecode, sqlalchemy, starlette, stdlib, structlog, subprocess, -# telemetry, test_logging, tornado, tracer, unittest, urllib3, vendor, vertica, wsgi, +# telemetry, test_logging, tornado, tracer, unittest, urllib3, valkey, vendor, vertica, wsgi, # yaaredis diff --git a/.github/workflows/build_deploy.yml b/.github/workflows/build_deploy.yml index 179ed367141..47584911df4 100644 --- a/.github/workflows/build_deploy.yml +++ b/.github/workflows/build_deploy.yml @@ -27,7 +27,7 @@ jobs: build_wheels: uses: ./.github/workflows/build_python_3.yml with: - cibw_build: 'cp37* cp38* cp39* cp310* cp311* cp312* cp313*' + cibw_build: 'cp38* cp39* cp310* cp311* cp312* cp313*' build_sdist: name: Build source distribution diff --git a/.gitlab/prepare-oci-package.sh b/.gitlab/prepare-oci-package.sh index 5958c31e731..7ee7b7d6e77 100755 --- a/.gitlab/prepare-oci-package.sh +++ b/.gitlab/prepare-oci-package.sh @@ -1,6 +1,11 @@ #!/bin/bash set -eo pipefail +if [ "$OS" != "linux" ]; then + echo "Only linux packages are supported. Exiting" + exit 0 +fi + if [ -n "$CI_COMMIT_TAG" ] && [ -z "$PYTHON_PACKAGE_VERSION" ]; then PYTHON_PACKAGE_VERSION=${CI_COMMIT_TAG##v} fi diff --git a/.gitlab/services.yml b/.gitlab/services.yml index 0282dd9e9d0..3eb29af107a 100644 --- a/.gitlab/services.yml +++ b/.gitlab/services.yml @@ -28,6 +28,9 @@ redis: name: registry.ddbuild.io/redis:7.0.7 alias: redis + valkey: + name: registry.ddbuild.io/images/mirror/valkey:8.0-alpine + alias: valkey kafka: name: registry.ddbuild.io/images/mirror/apache/kafka:3.8.0 alias: kafka @@ -54,6 +57,9 @@ rediscluster: name: registry.ddbuild.io/images/mirror/grokzen/redis-cluster:6.2.0 alias: rediscluster + valkeycluster: + name: registry.ddbuild.io/images/mirror/grokzen/redis-cluster:6.2.0 + alias: valkeycluster elasticsearch: name: registry.ddbuild.io/images/mirror/library/elasticsearch:7.17.23 alias: elasticsearch diff --git a/.riot/requirements/11ac941.txt b/.riot/requirements/11ac941.txt new file mode 100644 index 00000000000..92df617ba6e --- /dev/null +++ b/.riot/requirements/11ac941.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/11ac941.in +# +async-timeout==5.0.1 +attrs==24.3.0 +coverage[toml]==7.6.1 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +valkey==6.0.2 +zipp==3.20.2 diff --git a/.riot/requirements/1761702.txt b/.riot/requirements/1761702.txt deleted file mode 100644 index e6ca01ea565..00000000000 --- a/.riot/requirements/1761702.txt +++ /dev/null @@ -1,23 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1761702.in -# -attrs==23.1.0 -coverage[toml]==7.3.4 -exceptiongroup==1.2.0 -hypothesis==6.45.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.3 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -yaaredis==3.0.0 diff --git a/.riot/requirements/1cc7b0e.txt b/.riot/requirements/1cc7b0e.txt deleted file mode 100644 index adb8f71e30b..00000000000 --- a/.riot/requirements/1cc7b0e.txt +++ /dev/null @@ -1,25 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1cc7b0e.in -# -attrs==23.1.0 -coverage[toml]==7.3.4 -exceptiongroup==1.2.0 -hypothesis==6.45.0 -importlib-metadata==7.0.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.3 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -yaaredis==2.0.4 -zipp==3.17.0 diff --git a/.riot/requirements/1e98e9b.txt b/.riot/requirements/1e98e9b.txt new file mode 100644 index 00000000000..6e2d11413c3 --- /dev/null +++ b/.riot/requirements/1e98e9b.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e98e9b.in +# +async-timeout==5.0.1 +attrs==24.3.0 +coverage[toml]==7.6.10 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.23.7 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +valkey==6.0.2 +zipp==3.21.0 diff --git a/.riot/requirements/1f1e9b4.txt b/.riot/requirements/1f1e9b4.txt deleted file mode 100644 index d9b9189f22a..00000000000 --- a/.riot/requirements/1f1e9b4.txt +++ /dev/null @@ -1,25 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1f1e9b4.in -# -attrs==23.1.0 -coverage[toml]==7.3.4 -exceptiongroup==1.2.0 -hypothesis==6.45.0 -importlib-metadata==7.0.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.3 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -yaaredis==3.0.0 -zipp==3.17.0 diff --git a/.riot/requirements/4aa2a2a.txt b/.riot/requirements/4aa2a2a.txt new file mode 100644 index 00000000000..6bc72515b3f --- /dev/null +++ b/.riot/requirements/4aa2a2a.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/4aa2a2a.in +# +async-timeout==5.0.1 +attrs==24.3.0 +coverage[toml]==7.6.10 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.23.7 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 +valkey==6.0.2 diff --git a/.riot/requirements/7219cf4.txt b/.riot/requirements/7219cf4.txt new file mode 100644 index 00000000000..ffb631b7bcb --- /dev/null +++ b/.riot/requirements/7219cf4.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7219cf4.in +# +attrs==24.3.0 +coverage[toml]==7.6.10 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.23.7 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 +valkey==6.0.2 diff --git a/.riot/requirements/9b8251b.txt b/.riot/requirements/9b8251b.txt deleted file mode 100644 index c6c4004b105..00000000000 --- a/.riot/requirements/9b8251b.txt +++ /dev/null @@ -1,25 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.8 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/9b8251b.in -# -attrs==23.1.0 -coverage[toml]==7.3.4 -exceptiongroup==1.2.0 -hypothesis==6.45.0 -importlib-metadata==7.0.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.3 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -yaaredis==3.0.0 -zipp==3.17.0 diff --git a/.riot/requirements/b96b665.txt b/.riot/requirements/b96b665.txt new file mode 100644 index 00000000000..8b14d5cb8ec --- /dev/null +++ b/.riot/requirements/b96b665.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/b96b665.in +# +attrs==24.3.0 +coverage[toml]==7.6.10 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.23.7 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 +valkey==6.0.2 diff --git a/.riot/requirements/dd68acc.txt b/.riot/requirements/dd68acc.txt new file mode 100644 index 00000000000..8eda9971324 --- /dev/null +++ b/.riot/requirements/dd68acc.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/dd68acc.in +# +async-timeout==5.0.1 +attrs==24.3.0 +coverage[toml]==7.6.10 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.23.7 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +valkey==6.0.2 diff --git a/.riot/requirements/fda8aa6.txt b/.riot/requirements/fda8aa6.txt deleted file mode 100644 index efa619edb1e..00000000000 --- a/.riot/requirements/fda8aa6.txt +++ /dev/null @@ -1,25 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/fda8aa6.in -# -attrs==23.1.0 -coverage[toml]==7.3.4 -exceptiongroup==1.2.0 -hypothesis==6.45.0 -importlib-metadata==7.0.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.3 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -yaaredis==2.0.4 -zipp==3.17.0 diff --git a/benchmarks/bm/utils.py b/benchmarks/bm/utils.py index dd7b4991c57..13e99e8be74 100644 --- a/benchmarks/bm/utils.py +++ b/benchmarks/bm/utils.py @@ -65,7 +65,7 @@ def process_trace(self, trace): def drop_traces(tracer): - tracer.configure(settings={"FILTERS": [_DropTraces()]}) + tracer.configure(trace_processors=[_DropTraces()]) def drop_telemetry_events(): diff --git a/benchmarks/rate_limiter/scenario.py b/benchmarks/rate_limiter/scenario.py index 5210647ef89..3388af1cfb8 100644 --- a/benchmarks/rate_limiter/scenario.py +++ b/benchmarks/rate_limiter/scenario.py @@ -23,8 +23,8 @@ def _(loops): windows = [start + (i * self.time_window) for i in range(self.num_windows)] per_window = math.floor(loops / self.num_windows) - for window in windows: + for _ in windows: for _ in range(per_window): - rate_limiter.is_allowed(window) + rate_limiter.is_allowed() yield _ diff --git a/benchmarks/sampling_rule_matches/scenario.py b/benchmarks/sampling_rule_matches/scenario.py index 70ee5111bf8..d77926f5d65 100644 --- a/benchmarks/sampling_rule_matches/scenario.py +++ b/benchmarks/sampling_rule_matches/scenario.py @@ -4,8 +4,8 @@ import bm +from ddtrace._trace.sampling_rule import SamplingRule from ddtrace._trace.span import Span -from ddtrace.sampling_rule import SamplingRule def rands(size=6, chars=string.ascii_uppercase + string.digits): diff --git a/ddtrace/__init__.py b/ddtrace/__init__.py index e480851926f..008e931a482 100644 --- a/ddtrace/__init__.py +++ b/ddtrace/__init__.py @@ -27,9 +27,11 @@ from ._monkey import patch_all # noqa: E402 from .internal.compat import PYTHON_VERSION_INFO # noqa: E402 from .internal.utils.deprecations import DDTraceDeprecationWarning # noqa: E402 -from ddtrace._trace.pin import Pin # noqa: E402 -from ddtrace._trace.span import Span # noqa: E402 -from ddtrace._trace.tracer import Tracer # noqa: E402 + +# TODO(munir): Remove the imports below in v3.0 +from ddtrace._trace import pin as _p # noqa: E402, F401 +from ddtrace._trace import span as _s # noqa: E402, F401 +from ddtrace._trace import tracer as _t # noqa: E402, F401 from ddtrace.vendor import debtcollector from .version import get_version # noqa: E402 @@ -39,15 +41,6 @@ _start_mini_agent() -# DEV: Import deprecated tracer module in order to retain side-effect of package -# initialization, which added this module to sys.modules. We catch deprecation -# warnings as this is only to retain a side effect of the package -# initialization. -# TODO: Remove this in v3.0 when the ddtrace/tracer.py module is removed -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - from .tracer import Tracer as _ - __version__ = get_version() # TODO: Deprecate accessing tracer from ddtrace.__init__ module in v4.0 @@ -57,36 +50,11 @@ __all__ = [ "patch", "patch_all", - "Pin", - "Span", - "Tracer", "config", "DDTraceDeprecationWarning", ] -_DEPRECATED_TRACE_ATTRIBUTES = [ - "Span", - "Tracer", - "Pin", -] - - -def __getattr__(name): - if name in _DEPRECATED_TRACE_ATTRIBUTES: - debtcollector.deprecate( - ("%s.%s is deprecated" % (__name__, name)), - message="Import from ddtrace.trace instead.", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", - ) - - if name in globals(): - return globals()[name] - - raise AttributeError("%s has no attribute %s", __name__, name) - - def check_supported_python_version(): if PYTHON_VERSION_INFO < (3, 8): deprecation_message = ( diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index fad8c8f4d2b..0b9f2972885 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -93,7 +93,6 @@ "pyodbc": True, "fastapi": True, "dogpile_cache": True, - "yaaredis": True, "asyncpg": True, "aws_lambda": True, # patch only in AWS Lambda environments "azure_functions": True, @@ -105,6 +104,7 @@ "unittest": True, "coverage": False, "selenium": True, + "valkey": True, } diff --git a/ddtrace/_trace/pin.py b/ddtrace/_trace/pin.py index dd41a1040a1..e27640a993e 100644 --- a/ddtrace/_trace/pin.py +++ b/ddtrace/_trace/pin.py @@ -6,7 +6,6 @@ import wrapt import ddtrace -from ddtrace.vendor.debtcollector import deprecate from ..internal.logger import get_logger @@ -32,25 +31,17 @@ class Pin(object): >>> conn = sqlite.connect('/tmp/image.db') """ - __slots__ = ["tags", "tracer", "_target", "_config", "_initialized"] + __slots__ = ["tags", "_tracer", "_target", "_config", "_initialized"] def __init__( self, service=None, # type: Optional[str] tags=None, # type: Optional[Dict[str, str]] - tracer=None, _config=None, # type: Optional[Dict[str, Any]] ): # type: (...) -> None - if tracer is not None and tracer is not ddtrace.tracer: - deprecate( - "Initializing ddtrace.trace.Pin with `tracer` argument is deprecated", - message="All Pin instances should use the global tracer instance", - removal_version="3.0.0", - ) - tracer = tracer or ddtrace.tracer self.tags = tags - self.tracer = tracer + self._tracer = ddtrace.tracer self._target = None # type: Optional[int] # keep the configuration attribute internal because the # public API to access it is not the Pin class @@ -68,10 +59,14 @@ def service(self): return self._config["service_name"] def __setattr__(self, name, value): - if getattr(self, "_initialized", False) and name != "_target": + if getattr(self, "_initialized", False) and name not in ("_target", "_tracer"): raise AttributeError("can't mutate a pin, use override() or clone() instead") super(Pin, self).__setattr__(name, value) + @property + def tracer(self): + return self._tracer + def __repr__(self): return "Pin(service=%s, tags=%s, tracer=%s)" % (self.service, self.tags, self.tracer) @@ -127,7 +122,6 @@ def override( obj, # type: Any service=None, # type: Optional[str] tags=None, # type: Optional[Dict[str, str]] - tracer=None, ): # type: (...) -> None """Override an object with the given attributes. @@ -139,20 +133,32 @@ def override( >>> # Override a pin for a specific connection >>> Pin.override(conn, service='user-db') """ - if tracer is not None: - deprecate( - "Calling ddtrace.trace.Pin.override(...) with the `tracer` argument is deprecated", - message="All Pin instances should use the global tracer instance", - removal_version="3.0.0", - ) + Pin._override(obj, service=service, tags=tags) + + @classmethod + def _override( + cls, + obj, # type: Any + service=None, # type: Optional[str] + tags=None, # type: Optional[Dict[str, str]] + tracer=None, + ): + # type: (...) -> None + """ + Internal method that allows overriding the global tracer in tests + """ if not obj: return pin = cls.get_from(obj) if pin is None: - Pin(service=service, tags=tags, tracer=tracer).onto(obj) + pin = Pin(service=service, tags=tags) else: - pin.clone(service=service, tags=tags, tracer=tracer).onto(obj) + pin = pin.clone(service=service, tags=tags) + + if tracer: + pin._tracer = tracer + pin.onto(obj) def enabled(self): # type: () -> bool @@ -198,21 +204,22 @@ def clone( self, service=None, # type: Optional[str] tags=None, # type: Optional[Dict[str, str]] - tracer=None, ): # type: (...) -> Pin """Return a clone of the pin with the given attributes replaced.""" + return self._clone(service=service, tags=tags) + + def _clone( + self, + service=None, # type: Optional[str] + tags=None, # type: Optional[Dict[str, str]] + tracer=None, + ): + """Internal method that can clone the tracer from an existing Pin. This is used in tests""" # do a shallow copy of Pin dicts if not tags and self.tags: tags = self.tags.copy() - if tracer is not None: - deprecate( - "Initializing ddtrace.trace.Pin with `tracer` argument is deprecated", - message="All Pin instances should use the global tracer instance", - removal_version="3.0.0", - ) - # we use a copy instead of a deepcopy because we expect configurations # to have only a root level dictionary without nested objects. Using # deepcopy introduces a big overhead: @@ -221,9 +228,10 @@ def clone( # deepcopy: 0.2787208557128906 config = self._config.copy() - return Pin( + pin = Pin( service=service or self.service, tags=tags, - tracer=tracer or self.tracer, # do not clone the Tracer _config=config, ) + pin._tracer = tracer or self.tracer + return pin diff --git a/ddtrace/_trace/sampling_rule.py b/ddtrace/_trace/sampling_rule.py index 532a0b71f51..482a95d403a 100644 --- a/ddtrace/_trace/sampling_rule.py +++ b/ddtrace/_trace/sampling_rule.py @@ -8,8 +8,6 @@ from ddtrace.internal.glob_matching import GlobMatcher from ddtrace.internal.logger import get_logger from ddtrace.internal.utils.cache import cachedmethod -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate if TYPE_CHECKING: # pragma: no cover @@ -210,14 +208,12 @@ def choose_matcher(self, prop): # We currently support the ability to pass in a function, a regular expression, or a string # If a string is passed in we create a GlobMatcher to handle the matching if callable(prop) or isinstance(prop, pattern_type): - # deprecated: passing a function or a regular expression' - deprecate( - "Using methods or regular expressions for SamplingRule matching is deprecated. ", - message="Please move to passing in a string for Glob matching.", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, + log.error( + "Using methods or regular expressions for SamplingRule matching is not supported: %s ." + "Please move to passing in a string for Glob matching.", + str(prop), ) - return prop + return "None" # Name and Resource will never be None, but service can be, since we str() # whatever we pass into the GlobMatcher, we can just use its matching elif prop is None: diff --git a/ddtrace/_trace/span.py b/ddtrace/_trace/span.py index 446239a8091..c6eb4d4b72a 100644 --- a/ddtrace/_trace/span.py +++ b/ddtrace/_trace/span.py @@ -52,8 +52,6 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.sampling import SamplingMechanism from ddtrace.internal.sampling import set_sampling_decision_maker -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate _NUMERIC_TAGS = (_ANALYTICS_SAMPLE_RATE_KEY,) @@ -279,29 +277,6 @@ def duration(self) -> Optional[float]: def duration(self, value: float) -> None: self.duration_ns = int(value * 1e9) - @property - def sampled(self) -> Optional[bool]: - deprecate( - "span.sampled is deprecated and will be removed in a future version of the tracer.", - message="""span.sampled references the state of span.context.sampling_priority. - Please use span.context.sampling_priority instead to check if a span is sampled.""", - category=DDTraceDeprecationWarning, - ) - if self.context.sampling_priority is None: - # this maintains original span.sampled behavior, where all spans would start - # with span.sampled = True until sampling runs - return True - return self.context.sampling_priority > 0 - - @sampled.setter - def sampled(self, value: bool) -> None: - deprecate( - "span.sampled is deprecated and will be removed in a future version of the tracer.", - message="""span.sampled has a no-op setter. - Please use span.set_tag('manual.keep'/'manual.drop') to keep or drop spans.""", - category=DDTraceDeprecationWarning, - ) - def finish(self, finish_time: Optional[float] = None) -> None: """Mark the end time of the span and submit it to the tracer. If the span has already been finished don't do anything. diff --git a/ddtrace/_trace/trace_handlers.py b/ddtrace/_trace/trace_handlers.py index e0d99c0d020..d636a89c187 100644 --- a/ddtrace/_trace/trace_handlers.py +++ b/ddtrace/_trace/trace_handlers.py @@ -688,6 +688,11 @@ def _on_redis_command_post(ctx: core.ExecutionContext, rowcount): ctx.span.set_metric(db.ROWCOUNT, rowcount) +def _on_valkey_command_post(ctx: core.ExecutionContext, rowcount): + if rowcount is not None: + ctx.span.set_metric(db.ROWCOUNT, rowcount) + + def _on_test_visibility_enable(config) -> None: from ddtrace.internal.ci_visibility import CIVisibility @@ -797,6 +802,8 @@ def listen(): core.on("botocore.kinesis.GetRecords.post", _on_botocore_kinesis_getrecords_post) core.on("redis.async_command.post", _on_redis_command_post) core.on("redis.command.post", _on_redis_command_post) + core.on("valkey.async_command.post", _on_valkey_command_post) + core.on("valkey.command.post", _on_valkey_command_post) core.on("azure.functions.request_call_modifier", _on_azure_functions_request_span_modifier) core.on("azure.functions.start_response", _on_azure_functions_start_response) @@ -838,6 +845,7 @@ def listen(): "botocore.patched_stepfunctions_api_call", "botocore.patched_bedrock_api_call", "redis.command", + "valkey.command", "rq.queue.enqueue_job", "rq.traced_queue_fetch_job", "rq.worker.perform_job", diff --git a/ddtrace/_trace/tracer.py b/ddtrace/_trace/tracer.py index 87f312bb18c..9dac36c175e 100644 --- a/ddtrace/_trace/tracer.py +++ b/ddtrace/_trace/tracer.py @@ -24,6 +24,7 @@ from ddtrace._trace.processor import TraceProcessor from ddtrace._trace.processor import TraceSamplingProcessor from ddtrace._trace.processor import TraceTagsProcessor +from ddtrace._trace.provider import BaseContextProvider from ddtrace._trace.provider import DefaultContextProvider from ddtrace._trace.sampler import BasePrioritySampler from ddtrace._trace.sampler import BaseSampler @@ -58,7 +59,6 @@ from ddtrace.internal.serverless import in_gcp_function from ddtrace.internal.service import ServiceStatusError from ddtrace.internal.utils import _get_metas_to_propagate -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning from ddtrace.internal.utils.formats import format_trace_id from ddtrace.internal.utils.http import verify_url from ddtrace.internal.writer import AgentResponse @@ -68,7 +68,6 @@ from ddtrace.settings import Config from ddtrace.settings.asm import config as asm_config from ddtrace.settings.peer_service import _ps_config -from ddtrace.vendor.debtcollector import deprecate log = get_logger(__name__) @@ -200,7 +199,7 @@ def __init__( self, url: Optional[str] = None, dogstatsd_url: Optional[str] = None, - context_provider: Optional[DefaultContextProvider] = None, + context_provider: Optional[BaseContextProvider] = None, ) -> None: """ Create a new ``Tracer`` instance. A global tracer is already initialized @@ -216,16 +215,8 @@ def __init__( if Tracer._instance is None: Tracer._instance = self else: - # ddtrace library does not support context propagation for multiple tracers. - # All instances of ddtrace ContextProviders share the same ContextVars. This means that - # if you create multiple instances of Tracer, spans will be shared between them creating a - # broken experience. - # TODO(mabdinur): Convert this warning to an ValueError in 3.0.0 - deprecate( - "Support for multiple Tracer instances is deprecated", - ". Use ddtrace.tracer instead.", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", + log.error( + "Multiple Tracer instances can not be initialized. Use ``ddtrace.trace.tracer`` instead.", ) self._user_trace_processors: List[TraceProcessor] = [] @@ -328,28 +319,6 @@ def sample(self, span): else: log.error("No sampler available to sample span") - @property - def sampler(self): - deprecate( - "tracer.sampler is deprecated and will be removed.", - message="To manually sample call tracer.sample(span) instead.", - category=DDTraceDeprecationWarning, - ) - return self._sampler - - @sampler.setter - def sampler(self, value): - deprecate( - "Setting a custom sampler is deprecated and will be removed.", - message="""Please use DD_TRACE_SAMPLING_RULES to configure the sampler instead: - https://ddtrace.readthedocs.io/en/stable/configuration.html#DD_TRACE_SAMPLING_RULES""", - category=DDTraceDeprecationWarning, - ) - if asm_config._apm_opt_out: - log.warning("Cannot set a custom sampler with Standalone ASM mode") - return - self._sampler = value - def on_start_span(self, func: Callable) -> Callable: """Register a function to execute when a span start. @@ -441,21 +410,7 @@ def get_log_correlation_context(self, active: Optional[Union[Context, Span]] = N def configure( self, - enabled: Optional[bool] = None, - hostname: Optional[str] = None, - port: Optional[int] = None, - uds_path: Optional[str] = None, - https: Optional[bool] = None, - sampler: Optional[BaseSampler] = None, - context_provider: Optional[DefaultContextProvider] = None, - wrap_executor: Optional[Callable] = None, - priority_sampling: Optional[bool] = None, - settings: Optional[Dict[str, Any]] = None, - dogstatsd_url: Optional[str] = None, - writer: Optional[TraceWriter] = None, - partial_flush_enabled: Optional[bool] = None, - partial_flush_min_spans: Optional[int] = None, - api_version: Optional[str] = None, + context_provider: Optional[BaseContextProvider] = None, compute_stats_enabled: Optional[bool] = None, appsec_enabled: Optional[bool] = None, iast_enabled: Optional[bool] = None, @@ -472,58 +427,14 @@ def configure( :param bool appsec_standalone_enabled: When tracing is disabled ensures ASM support is still enabled. :param List[TraceProcessor] trace_processors: This parameter sets TraceProcessor (ex: TraceFilters). Trace processors are used to modify and filter traces based on certain criteria. - - :param bool enabled: If True, finished traces will be submitted to the API, else they'll be dropped. - This parameter is deprecated and will be removed. - :param str hostname: Hostname running the Trace Agent. This parameter is deprecated and will be removed. - :param int port: Port of the Trace Agent. This parameter is deprecated and will be removed. - :param str uds_path: The Unix Domain Socket path of the agent. This parameter is deprecated and will be removed. - :param bool https: Whether to use HTTPS or HTTP. This parameter is deprecated and will be removed. - :param object sampler: A custom Sampler instance, locally deciding to totally drop the trace or not. - This parameter is deprecated and will be removed. - :param object wrap_executor: callable that is used when a function is decorated with - ``Tracer.wrap()``. This is an advanced option that usually doesn't need to be changed - from the default value. This parameter is deprecated and will be removed. - :param priority_sampling: This parameter is deprecated and will be removed in a future version. - :param bool settings: This parameter is deprecated and will be removed. - :param str dogstatsd_url: URL for UDP or Unix socket connection to DogStatsD - This parameter is deprecated and will be removed. - :param TraceWriter writer: This parameter is deprecated and will be removed. - :param bool partial_flush_enabled: This parameter is deprecated and will be removed. - :param bool partial_flush_min_spans: This parameter is deprecated and will be removed. - :param str api_version: This parameter is deprecated and will be removed. - :param bool compute_stats_enabled: This parameter is deprecated and will be removed. """ - if settings is not None: - deprecate( - "Support for ``tracer.configure(...)`` with the settings parameter is deprecated", - message="Please use the trace_processors parameter instead of settings['FILTERS'].", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - trace_processors = (trace_processors or []) + (settings.get("FILTERS") or []) - return self._configure( - enabled, - hostname, - port, - uds_path, - https, - sampler, - context_provider, - wrap_executor, - priority_sampling, - trace_processors, - dogstatsd_url, - writer, - partial_flush_enabled, - partial_flush_min_spans, - api_version, - compute_stats_enabled, - appsec_enabled, - iast_enabled, - appsec_standalone_enabled, - True, + context_provider=context_provider, + trace_processors=trace_processors, + compute_stats_enabled=compute_stats_enabled, + appsec_enabled=appsec_enabled, + iast_enabled=iast_enabled, + appsec_standalone_enabled=appsec_standalone_enabled, ) def _configure( @@ -534,7 +445,7 @@ def _configure( uds_path: Optional[str] = None, https: Optional[bool] = None, sampler: Optional[BaseSampler] = None, - context_provider: Optional[DefaultContextProvider] = None, + context_provider: Optional[BaseContextProvider] = None, wrap_executor: Optional[Callable] = None, priority_sampling: Optional[bool] = None, trace_processors: Optional[List[TraceProcessor]] = None, @@ -547,48 +458,18 @@ def _configure( appsec_enabled: Optional[bool] = None, iast_enabled: Optional[bool] = None, appsec_standalone_enabled: Optional[bool] = None, - log_deprecations: bool = False, ) -> None: if enabled is not None: self.enabled = enabled - if log_deprecations: - deprecate( - "Enabling/Disabling tracing after application start is deprecated", - message="Please use DD_TRACE_ENABLED instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - - if priority_sampling is not None and log_deprecations: - deprecate( - "Disabling priority sampling is deprecated", - message="Calling `tracer.configure(priority_sampling=....) has no effect", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) if trace_processors is not None: self._user_trace_processors = trace_processors if partial_flush_enabled is not None: self._partial_flush_enabled = partial_flush_enabled - if log_deprecations: - deprecate( - "Configuring partial flushing after application start is deprecated", - message="Please use DD_TRACE_PARTIAL_FLUSH_ENABLED to enable/disable the partial flushing instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) if partial_flush_min_spans is not None: self._partial_flush_min_spans = partial_flush_min_spans - if log_deprecations: - deprecate( - "Configuring partial flushing after application start is deprecated", - message="Please use DD_TRACE_PARTIAL_FLUSH_MIN_SPANS to set the flushing threshold instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) if appsec_enabled is not None: asm_config._asm_enabled = appsec_enabled @@ -620,33 +501,11 @@ def _configure( if sampler is not None: self._sampler = sampler self._user_sampler = self._sampler - if log_deprecations: - deprecate( - "Configuring custom samplers is deprecated", - message="Please use DD_TRACE_SAMPLING_RULES to configure the sample rates instead", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", - ) if dogstatsd_url is not None: - if log_deprecations: - deprecate( - "Configuring dogstatsd_url after application start is deprecated", - message="Please use DD_DOGSTATSD_URL instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) self._dogstatsd_url = dogstatsd_url if any(x is not None for x in [hostname, port, uds_path, https]): - if log_deprecations: - deprecate( - "Configuring tracer agent connection after application start is deprecated", - message="Please use DD_TRACE_AGENT_URL instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - # If any of the parts of the URL have updated, merge them with # the previous writer values. prev_url_parsed = compat.parse.urlparse(self._agent_url) @@ -670,13 +529,6 @@ def _configure( new_url = None if compute_stats_enabled is not None: - if log_deprecations: - deprecate( - "Configuring tracer stats computation after application start is deprecated", - message="Please use DD_TRACE_STATS_COMPUTATION_ENABLED instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) self._compute_stats = compute_stats_enabled try: @@ -685,14 +537,6 @@ def _configure( # It's possible the writer never got started pass - if api_version is not None and log_deprecations: - deprecate( - "Configuring Tracer API version after application start is deprecated", - message="Please use DD_TRACE_API_VERSION instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - if writer is not None: self._writer = writer elif any(x is not None for x in [new_url, api_version, sampler, dogstatsd_url, appsec_enabled]): @@ -754,12 +598,6 @@ def _configure( if wrap_executor is not None: self._wrap_executor = wrap_executor - if log_deprecations: - deprecate( - "Support for tracer.configure(...) with the wrap_executor parameter is deprecated", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) self._generate_diagnostic_logs() @@ -1344,7 +1182,7 @@ def _handle_sampler_update(self, cfg: Config) -> None: and self._user_sampler ): # if we get empty configs from rc for both sample rate and rules, we should revert to the user sampler - self.sampler = self._user_sampler + self._sampler = self._user_sampler return if cfg._get_source("_trace_sample_rate") != "remote_config" and self._user_sampler: diff --git a/ddtrace/_trace/utils_valkey.py b/ddtrace/_trace/utils_valkey.py new file mode 100644 index 00000000000..ed996e885a3 --- /dev/null +++ b/ddtrace/_trace/utils_valkey.py @@ -0,0 +1,96 @@ +""" +Some utils used by the dogtrace valkey integration +""" + +from contextlib import contextmanager +from typing import List +from typing import Optional + +from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.constants import _SPAN_MEASURED_KEY +from ddtrace.constants import SPAN_KIND +from ddtrace.contrib import trace_utils +from ddtrace.contrib.internal.valkey_utils import _extract_conn_tags +from ddtrace.ext import SpanKind +from ddtrace.ext import SpanTypes +from ddtrace.ext import db +from ddtrace.ext import valkey as valkeyx +from ddtrace.internal import core +from ddtrace.internal.constants import COMPONENT +from ddtrace.internal.schema import schematize_cache_operation +from ddtrace.internal.utils.formats import stringify_cache_args + + +format_command_args = stringify_cache_args + + +def _set_span_tags( + span, pin, config_integration, args: Optional[List], instance, query: Optional[List], is_cluster: bool = False +): + span.set_tag_str(SPAN_KIND, SpanKind.CLIENT) + span.set_tag_str(COMPONENT, config_integration.integration_name) + span.set_tag_str(db.SYSTEM, valkeyx.APP) + span.set_tag(_SPAN_MEASURED_KEY) + if query is not None: + span_name = schematize_cache_operation(valkeyx.RAWCMD, cache_provider=valkeyx.APP) # type: ignore[operator] + span.set_tag_str(span_name, query) + if pin.tags: + span.set_tags(pin.tags) + # some valkey clients do not have a connection_pool attribute (ex. aiovalkey v1.3) + if not is_cluster and hasattr(instance, "connection_pool"): + span.set_tags(_extract_conn_tags(instance.connection_pool.connection_kwargs)) + if args is not None: + span.set_metric(valkeyx.ARGS_LEN, len(args)) + else: + for attr in ("command_stack", "_command_stack"): + if hasattr(instance, attr): + span.set_metric(valkeyx.PIPELINE_LEN, len(getattr(instance, attr))) + # set analytics sample rate if enabled + span.set_tag(_ANALYTICS_SAMPLE_RATE_KEY, config_integration.get_analytics_sample_rate()) + + +@contextmanager +def _instrument_valkey_cmd(pin, config_integration, instance, args): + query = stringify_cache_args(args, cmd_max_len=config_integration.cmd_max_length) + with core.context_with_data( + "valkey.command", + span_name=schematize_cache_operation(valkeyx.CMD, cache_provider=valkeyx.APP), + pin=pin, + service=trace_utils.ext_service(pin, config_integration), + span_type=SpanTypes.VALKEY, + resource=query.split(" ")[0] if config_integration.resource_only_command else query, + ) as ctx, ctx.span as span: + _set_span_tags(span, pin, config_integration, args, instance, query) + yield ctx + + +@contextmanager +def _instrument_valkey_execute_pipeline(pin, config_integration, cmds, instance, is_cluster=False): + cmd_string = resource = "\n".join(cmds) + if config_integration.resource_only_command: + resource = "\n".join([cmd.split(" ")[0] for cmd in cmds]) + + with pin.tracer.trace( + schematize_cache_operation(valkeyx.CMD, cache_provider=valkeyx.APP), + resource=resource, + service=trace_utils.ext_service(pin, config_integration), + span_type=SpanTypes.VALKEY, + ) as span: + _set_span_tags(span, pin, config_integration, None, instance, cmd_string) + yield span + + +@contextmanager +def _instrument_valkey_execute_async_cluster_pipeline(pin, config_integration, cmds, instance): + cmd_string = resource = "\n".join(cmds) + if config_integration.resource_only_command: + resource = "\n".join([cmd.split(" ")[0] for cmd in cmds]) + + with pin.tracer.trace( + schematize_cache_operation(valkeyx.CMD, cache_provider=valkeyx.APP), + resource=resource, + service=trace_utils.ext_service(pin, config_integration), + span_type=SpanTypes.VALKEY, + ) as span: + _set_span_tags(span, pin, config_integration, None, instance, cmd_string) + yield span diff --git a/ddtrace/appsec/__init__.py b/ddtrace/appsec/__init__.py index 05d1a852710..6b5758a95c2 100644 --- a/ddtrace/appsec/__init__.py +++ b/ddtrace/appsec/__init__.py @@ -1,5 +1,6 @@ +# this module must not load any other unsafe appsec module directly + from ddtrace.internal import core -from ddtrace.settings.asm import config as asm_config _APPSEC_TO_BE_LOADED = True @@ -28,7 +29,9 @@ def load_iast(): def load_common_appsec_modules(): """Lazily load the common module patches.""" - if (asm_config._ep_enabled and asm_config._asm_enabled) or asm_config._iast_enabled: + from ddtrace.settings.asm import config as asm_config + + if asm_config._load_modules: from ddtrace.appsec._common_module_patches import patch_common_modules patch_common_modules() diff --git a/ddtrace/appsec/_asm_request_context.py b/ddtrace/appsec/_asm_request_context.py index d8f258d43a7..bd6c8b21a2a 100644 --- a/ddtrace/appsec/_asm_request_context.py +++ b/ddtrace/appsec/_asm_request_context.py @@ -15,9 +15,6 @@ from ddtrace.appsec._constants import APPSEC from ddtrace.appsec._constants import EXPLOIT_PREVENTION from ddtrace.appsec._constants import SPAN_DATA_NAMES -from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled -from ddtrace.appsec._iast._taint_tracking import OriginType -from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject from ddtrace.appsec._utils import add_context_log from ddtrace.appsec._utils import get_triggers from ddtrace.internal import core @@ -28,6 +25,16 @@ from ddtrace.trace import Span +if asm_config._iast_enabled: + from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled + from ddtrace.appsec._iast._taint_tracking import OriginType + from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject +else: + + def is_iast_request_enabled() -> bool: + return False + + if TYPE_CHECKING: from ddtrace.appsec._ddwaf import DDWaf_info from ddtrace.appsec._ddwaf import DDWaf_result diff --git a/ddtrace/appsec/_common_module_patches.py b/ddtrace/appsec/_common_module_patches.py index 8c834b80e6f..ac3c2c4e775 100644 --- a/ddtrace/appsec/_common_module_patches.py +++ b/ddtrace/appsec/_common_module_patches.py @@ -44,6 +44,10 @@ def is_iast_request_enabled() -> bool: def patch_common_modules(): global _is_patched + # ensure that the subprocess patch is applied even after one click activation + subprocess_patch.patch() + subprocess_patch.add_str_callback(_RASP_SYSTEM, wrapped_system_5542593D237084A7) + subprocess_patch.add_lst_callback(_RASP_POPEN, popen_FD233052260D8B4D) if _is_patched: return # for testing purposes, we need to update is_iast_request_enabled @@ -60,10 +64,6 @@ def is_iast_request_enabled() -> bool: try_wrap_function_wrapper("urllib.request", "OpenerDirector.open", wrapped_open_ED4CF71136E15EBF) try_wrap_function_wrapper("_io", "BytesIO.read", wrapped_read_F3E51D71B4EC16EF) try_wrap_function_wrapper("_io", "StringIO.read", wrapped_read_F3E51D71B4EC16EF) - # ensure that the subprocess patch is applied even after one click activation - subprocess_patch.patch() - subprocess_patch.add_str_callback(_RASP_SYSTEM, wrapped_system_5542593D237084A7) - subprocess_patch.add_lst_callback(_RASP_POPEN, popen_FD233052260D8B4D) core.on("asm.block.dbapi.execute", execute_4C9BAC8E228EB347) if asm_config._iast_enabled: from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink diff --git a/ddtrace/appsec/_constants.py b/ddtrace/appsec/_constants.py index 2172548205b..454483fcf17 100644 --- a/ddtrace/appsec/_constants.py +++ b/ddtrace/appsec/_constants.py @@ -1,3 +1,5 @@ +# this module must not load any other unsafe appsec module directly + import os from re import Match import sys diff --git a/ddtrace/appsec/_iast/_evidence_redaction/_sensitive_handler.py b/ddtrace/appsec/_iast/_evidence_redaction/_sensitive_handler.py index a10455dee42..dccc18a39b6 100644 --- a/ddtrace/appsec/_iast/_evidence_redaction/_sensitive_handler.py +++ b/ddtrace/appsec/_iast/_evidence_redaction/_sensitive_handler.py @@ -10,6 +10,7 @@ from ..constants import VULN_HEADER_INJECTION from ..constants import VULN_SQL_INJECTION from ..constants import VULN_SSRF +from ..constants import VULN_XSS from .command_injection_sensitive_analyzer import command_injection_sensitive_analyzer from .default_sensitive_analyzer import default_sensitive_analyzer from .header_injection_sensitive_analyzer import header_injection_sensitive_analyzer @@ -45,6 +46,7 @@ def __init__(self): VULN_SQL_INJECTION: sql_sensitive_analyzer, VULN_SSRF: url_sensitive_analyzer, VULN_HEADER_INJECTION: header_injection_sensitive_analyzer, + VULN_XSS: default_sensitive_analyzer, VULN_CODE_INJECTION: default_sensitive_analyzer, } diff --git a/ddtrace/appsec/_iast/_handlers.py b/ddtrace/appsec/_iast/_handlers.py index cf60fc610be..bcd913085f4 100644 --- a/ddtrace/appsec/_iast/_handlers.py +++ b/ddtrace/appsec/_iast/_handlers.py @@ -82,23 +82,28 @@ def _on_flask_patch(flask_version): "Headers.items", functools.partial(if_iast_taint_yield_tuple_for, (OriginType.HEADER_NAME, OriginType.HEADER)), ) - _set_metric_iast_instrumented_source(OriginType.HEADER_NAME) - _set_metric_iast_instrumented_source(OriginType.HEADER) try_wrap_function_wrapper( "werkzeug.datastructures", - "ImmutableMultiDict.__getitem__", - functools.partial(if_iast_taint_returned_object_for, OriginType.PARAMETER), + "EnvironHeaders.__getitem__", + functools.partial(if_iast_taint_returned_object_for, OriginType.HEADER), ) - _set_metric_iast_instrumented_source(OriginType.PARAMETER) - + # Since werkzeug 3.1.0 get doesn't call to __getitem__ try_wrap_function_wrapper( "werkzeug.datastructures", - "EnvironHeaders.__getitem__", + "EnvironHeaders.get", functools.partial(if_iast_taint_returned_object_for, OriginType.HEADER), ) + _set_metric_iast_instrumented_source(OriginType.HEADER_NAME) _set_metric_iast_instrumented_source(OriginType.HEADER) + try_wrap_function_wrapper( + "werkzeug.datastructures", + "ImmutableMultiDict.__getitem__", + functools.partial(if_iast_taint_returned_object_for, OriginType.PARAMETER), + ) + _set_metric_iast_instrumented_source(OriginType.PARAMETER) + if flask_version >= (2, 0, 0): # instance.query_string: raising an error on werkzeug/_internal.py "AttributeError: read only property" try_wrap_function_wrapper("werkzeug.wrappers.request", "Request.__init__", _on_request_init) diff --git a/ddtrace/appsec/_iast/_patch_modules.py b/ddtrace/appsec/_iast/_patch_modules.py index e91438ebd49..634cd6399c5 100644 --- a/ddtrace/appsec/_iast/_patch_modules.py +++ b/ddtrace/appsec/_iast/_patch_modules.py @@ -7,6 +7,7 @@ "header_injection": True, "weak_cipher": True, "weak_hash": True, + "xss": True, } diff --git a/ddtrace/appsec/_iast/constants.py b/ddtrace/appsec/_iast/constants.py index 9ac6edb0ab1..3d0edc31b83 100644 --- a/ddtrace/appsec/_iast/constants.py +++ b/ddtrace/appsec/_iast/constants.py @@ -14,6 +14,7 @@ VULN_CMDI = "COMMAND_INJECTION" VULN_HEADER_INJECTION = "HEADER_INJECTION" VULN_CODE_INJECTION = "CODE_INJECTION" +VULN_XSS = "XSS" VULN_SSRF = "SSRF" VULN_STACKTRACE_LEAK = "STACKTRACE_LEAK" diff --git a/ddtrace/appsec/_iast/taint_sinks/xss.py b/ddtrace/appsec/_iast/taint_sinks/xss.py new file mode 100644 index 00000000000..73350faac44 --- /dev/null +++ b/ddtrace/appsec/_iast/taint_sinks/xss.py @@ -0,0 +1,104 @@ +from typing import Text + +from ddtrace.appsec._common_module_patches import try_unwrap +from ddtrace.appsec._constants import IAST_SPAN_TAGS +from ddtrace.appsec._iast import oce +from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled +from ddtrace.appsec._iast._metrics import _set_metric_iast_executed_sink +from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink +from ddtrace.appsec._iast._metrics import increment_iast_span_metric +from ddtrace.appsec._iast._patch import set_and_check_module_is_patched +from ddtrace.appsec._iast._patch import set_module_unpatched +from ddtrace.appsec._iast._patch import try_wrap_function_wrapper +from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted +from ddtrace.appsec._iast.constants import VULN_XSS +from ddtrace.appsec._iast.taint_sinks._base import VulnerabilityBase +from ddtrace.internal.logger import get_logger +from ddtrace.settings.asm import config as asm_config + + +log = get_logger(__name__) + + +@oce.register +class XSS(VulnerabilityBase): + vulnerability_type = VULN_XSS + + +def get_version() -> Text: + return "" + + +def patch(): + if not asm_config._iast_enabled: + return + + if not set_and_check_module_is_patched("flask", default_attr="_datadog_xss_patch"): + return + if not set_and_check_module_is_patched("django", default_attr="_datadog_xss_patch"): + return + if not set_and_check_module_is_patched("fastapi", default_attr="_datadog_xss_patch"): + return + + try_wrap_function_wrapper( + "django.utils.safestring", + "mark_safe", + _iast_django_xss, + ) + + try_wrap_function_wrapper( + "django.template.defaultfilters", + "mark_safe", + _iast_django_xss, + ) + + try_wrap_function_wrapper( + "jinja2.filters", + "do_mark_safe", + _iast_jinja2_xss, + ) + try_wrap_function_wrapper( + "flask", + "render_template_string", + _iast_jinja2_xss, + ) + + _set_metric_iast_instrumented_sink(VULN_XSS) + # Even when starting the application with `ddtrace-run ddtrace-run`, `jinja2.FILTERS` is created before this patch + # function executes. Therefore, we update the in-memory object with the newly patched version. + try: + from jinja2.filters import FILTERS + from jinja2.filters import do_mark_safe + + FILTERS["safe"] = do_mark_safe + except (ImportError, KeyError): + pass + + +def unpatch(): + try_unwrap("django.utils.safestring", "mark_safe") + try_unwrap("django.template.defaultfilters", "mark_safe") + + set_module_unpatched("flask", default_attr="_datadog_xss_patch") + set_module_unpatched("django", default_attr="_datadog_xss_patch") + set_module_unpatched("fastapi", default_attr="_datadog_xss_patch") + + +def _iast_django_xss(wrapped, instance, args, kwargs): + if args and len(args) >= 1: + _iast_report_xss(args[0]) + return wrapped(*args, **kwargs) + + +def _iast_jinja2_xss(wrapped, instance, args, kwargs): + if args and len(args) >= 1: + _iast_report_xss(args[0]) + return wrapped(*args, **kwargs) + + +def _iast_report_xss(code_string: Text): + increment_iast_span_metric(IAST_SPAN_TAGS.TELEMETRY_EXECUTED_SINK, XSS.vulnerability_type) + _set_metric_iast_executed_sink(XSS.vulnerability_type) + if is_iast_request_enabled(): + if is_pyobject_tainted(code_string): + XSS.report(evidence_value=code_string) diff --git a/ddtrace/appsec/_python_info/stdlib/__init__.py b/ddtrace/appsec/_python_info/stdlib/__init__.py index e745c392f55..8b220b0af85 100644 --- a/ddtrace/appsec/_python_info/stdlib/__init__.py +++ b/ddtrace/appsec/_python_info/stdlib/__init__.py @@ -3,11 +3,7 @@ from sys import version_info -if version_info < (3, 7, 0): - from .module_names_py36 import STDLIB_MODULE_NAMES -elif version_info < (3, 8, 0): - from .module_names_py37 import STDLIB_MODULE_NAMES -elif version_info < (3, 9, 0): +if version_info < (3, 9, 0): from .module_names_py38 import STDLIB_MODULE_NAMES elif version_info < (3, 10, 0): from .module_names_py39 import STDLIB_MODULE_NAMES diff --git a/ddtrace/appsec/_utils.py b/ddtrace/appsec/_utils.py index 79f8f8b5311..e4dbae7a27f 100644 --- a/ddtrace/appsec/_utils.py +++ b/ddtrace/appsec/_utils.py @@ -1,3 +1,5 @@ +# this module must not load any other unsafe appsec module directly + import logging import sys from typing import Any @@ -5,6 +7,7 @@ from ddtrace.appsec._constants import API_SECURITY from ddtrace.appsec._constants import APPSEC +from ddtrace.appsec._constants import SPAN_DATA_NAMES from ddtrace.internal._unpatched import unpatched_json_loads from ddtrace.internal.compat import to_unicode from ddtrace.internal.logger import get_logger @@ -21,7 +24,6 @@ def parse_response_body(raw_body): import xmltodict from ddtrace.appsec import _asm_request_context - from ddtrace.appsec._constants import SPAN_DATA_NAMES from ddtrace.contrib.internal.trace_utils import _get_header_value_case_insensitive if not raw_body: diff --git a/ddtrace/constants.py b/ddtrace/constants.py index b4694e24345..829a57a45a7 100644 --- a/ddtrace/constants.py +++ b/ddtrace/constants.py @@ -1,39 +1,37 @@ -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning as _DDTraceDeprecationWarning -from ddtrace.vendor import debtcollector as _debtcollector - - -# TODO: Deprecate and remove the SAMPLE_RATE_METRIC_KEY constant. -# This key enables legacy trace sampling support in the Datadog agent. -_SAMPLE_RATE_METRIC_KEY = SAMPLE_RATE_METRIC_KEY = "_sample_rate" -_SAMPLING_PRIORITY_KEY = SAMPLING_PRIORITY_KEY = "_sampling_priority_v1" -_ANALYTICS_SAMPLE_RATE_KEY = ANALYTICS_SAMPLE_RATE_KEY = "_dd1.sr.eausr" -_SAMPLING_AGENT_DECISION = SAMPLING_AGENT_DECISION = "_dd.agent_psr" -_SAMPLING_RULE_DECISION = SAMPLING_RULE_DECISION = "_dd.rule_psr" -_SAMPLING_LIMIT_DECISION = SAMPLING_LIMIT_DECISION = "_dd.limit_psr" +""" +This module contains constants used across ddtrace products. + +Constants that should NOT be referenced by ddtrace users are marked with a leading underscore. +""" +_SAMPLING_PRIORITY_KEY = "_sampling_priority_v1" +_ANALYTICS_SAMPLE_RATE_KEY = "_dd1.sr.eausr" +_SAMPLING_AGENT_DECISION = "_dd.agent_psr" +_SAMPLING_RULE_DECISION = "_dd.rule_psr" +_SAMPLING_LIMIT_DECISION = "_dd.limit_psr" _SINGLE_SPAN_SAMPLING_MECHANISM = "_dd.span_sampling.mechanism" _SINGLE_SPAN_SAMPLING_RATE = "_dd.span_sampling.rule_rate" _SINGLE_SPAN_SAMPLING_MAX_PER_SEC = "_dd.span_sampling.max_per_second" _SINGLE_SPAN_SAMPLING_MAX_PER_SEC_NO_LIMIT = -1 _APM_ENABLED_METRIC_KEY = "_dd.apm.enabled" -_ORIGIN_KEY = ORIGIN_KEY = "_dd.origin" -_USER_ID_KEY = USER_ID_KEY = "_dd.p.usr.id" -_HOSTNAME_KEY = HOSTNAME_KEY = "_dd.hostname" -_RUNTIME_FAMILY = RUNTIME_FAMILY = "_dd.runtime_family" +_ORIGIN_KEY = "_dd.origin" +_USER_ID_KEY = "_dd.p.usr.id" +_HOSTNAME_KEY = "_dd.hostname" +_RUNTIME_FAMILY = "_dd.runtime_family" ENV_KEY = "env" VERSION_KEY = "version" SERVICE_KEY = "service.name" -_BASE_SERVICE_KEY = BASE_SERVICE_KEY = "_dd.base_service" +_BASE_SERVICE_KEY = "_dd.base_service" SERVICE_VERSION_KEY = "service.version" SPAN_KIND = "span.kind" -_SPAN_MEASURED_KEY = SPAN_MEASURED_KEY = "_dd.measured" -_KEEP_SPANS_RATE_KEY = KEEP_SPANS_RATE_KEY = "_dd.tracer_kr" -_MULTIPLE_IP_HEADERS = MULTIPLE_IP_HEADERS = "_dd.multiple-ip-headers" +_SPAN_MEASURED_KEY = "_dd.measured" +_KEEP_SPANS_RATE_KEY = "_dd.tracer_kr" +_MULTIPLE_IP_HEADERS = "_dd.multiple-ip-headers" APPSEC_ENV = "DD_APPSEC_ENABLED" -_CONFIG_ENDPOINT_ENV = CONFIG_ENDPOINT_ENV = "_DD_CONFIG_ENDPOINT" -_CONFIG_ENDPOINT_RETRIES_ENV = CONFIG_ENDPOINT_RETRIES_ENV = "_DD_CONFIG_ENDPOINT_RETRIES" -_CONFIG_ENDPOINT_TIMEOUT_ENV = CONFIG_ENDPOINT_TIMEOUT_ENV = "_DD_CONFIG_ENDPOINT_TIMEOUT" +_CONFIG_ENDPOINT_ENV = "_DD_CONFIG_ENDPOINT" +_CONFIG_ENDPOINT_RETRIES_ENV = "_DD_CONFIG_ENDPOINT_RETRIES" +_CONFIG_ENDPOINT_TIMEOUT_ENV = "_DD_CONFIG_ENDPOINT_TIMEOUT" IAST_ENV = "DD_IAST_ENABLED" MANUAL_DROP_KEY = "manual.drop" @@ -53,38 +51,3 @@ AUTO_KEEP = 1 # Use this to explicitly inform the backend that a trace should be kept and stored. USER_KEEP = 2 - - -_DEPRECATED_MODULE_ATTRIBUTES = [ - "ANALYTICS_SAMPLE_RATE_KEY", - "SAMPLE_RATE_METRIC_KEY", - "SAMPLING_PRIORITY_KEY", - "SAMPLING_AGENT_DECISION", - "SAMPLING_RULE_DECISION", - "SAMPLING_LIMIT_DECISION", - "USER_ID_KEY", - "ORIGIN_KEY", - "HOSTNAME_KEY", - "RUNTIME_FAMILY", - "BASE_SERVICE_KEY", - "SPAN_MEASURED_KEY", - "KEEP_SPANS_RATE_KEY", - "MULTIPLE_IP_HEADERS", - "CONFIG_ENDPOINT_ENV", - "CONFIG_ENDPOINT_RETRIES_ENV", - "CONFIG_ENDPOINT_TIMEOUT_ENV", -] - - -def __getattr__(name): - if name in _DEPRECATED_MODULE_ATTRIBUTES: - _debtcollector.deprecate( - ("%s.%s is deprecated" % (__name__, name)), - category=_DDTraceDeprecationWarning, - removal_version="3.0.0", - ) - - if name in globals(): - return globals()[name] - - raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/context.py b/ddtrace/context.py deleted file mode 100644 index 843ef510c38..00000000000 --- a/ddtrace/context.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.trace import Context # noqa: F401 -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.context module is deprecated and will be removed from the public API.", - message="Context should be imported from the ddtrace.trace package", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/contrib/_langchain.py b/ddtrace/contrib/_langchain.py index d36cd76f3f1..4d419cc5d5c 100644 --- a/ddtrace/contrib/_langchain.py +++ b/ddtrace/contrib/_langchain.py @@ -1,9 +1,8 @@ """ -The LangChain integration instruments the LangChain Python library to emit metrics, -traces, and logs (logs are disabled by default) for requests made to the LLMs, +The LangChain integration instruments the LangChain Python library to emit traces for requests made to the LLMs, chat models, embeddings, chains, and vector store interfaces. -All metrics, logs, and traces submitted from the LangChain integration are tagged by: +All traces submitted from the LangChain integration are tagged by: - ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. - ``langchain.request.provider``: LLM provider used in the request. @@ -26,58 +25,6 @@ - Total cost metrics for OpenAI requests -Metrics -~~~~~~~ - -The following metrics are collected by default by the LangChain integration. - -.. important:: - If the Agent is configured to use a non-default Statsd hostname or port, use ``DD_DOGSTATSD_URL`` to configure - ``ddtrace`` to use it. - - -.. py:data:: langchain.request.duration - - The duration of the LangChain request in seconds. - - Type: ``distribution`` - - -.. py:data:: langchain.request.error - - The number of errors from requests made with LangChain. - - Type: ``count`` - - -.. py:data:: langchain.tokens.prompt - - The number of tokens used in the prompt of a LangChain request. - - Type: ``distribution`` - - -.. py:data:: langchain.tokens.completion - - The number of tokens used in the completion of a LangChain response. - - Type: ``distribution`` - - -.. py:data:: langchain.tokens.total - - The total number of tokens used in the prompt and completion of a LangChain request/response. - - Type: ``distribution`` - - -.. py:data:: langchain.tokens.total_cost - - The estimated cost in USD based on token usage. - - Type: ``count`` - - (beta) Prompt and Completion Sampling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -89,18 +36,6 @@ - Prompt inputs, chain inputs, and outputs for the ``Chain`` interface. - Query inputs and document outputs for the ``VectorStore`` interface. -Prompt and message inputs and completions can also be emitted as log data. -Logs are **not** emitted by default. When logs are enabled they are sampled at ``0.1``. - -Read the **Global Configuration** section for information about enabling logs and configuring sampling -rates. - -.. important:: - - To submit logs, you must set the ``DD_API_KEY`` environment variable. - - Set ``DD_SITE`` to send logs to a Datadog site such as ``datadoghq.eu``. The default is ``datadoghq.com``. - Enabling ~~~~~~~~ @@ -143,32 +78,6 @@ Default: ``DD_SERVICE`` -.. py:data:: ddtrace.config.langchain["logs_enabled"] - - Enable collection of prompts and completions as logs. You can adjust the rate of prompts and completions collected - using the sample rate configuration described below. - - Alternatively, you can set this option with the ``DD_LANGCHAIN_LOGS_ENABLED`` environment - variable. - - Note that you must set the ``DD_API_KEY`` environment variable to enable sending logs. - - Default: ``False`` - - -.. py:data:: ddtrace.config.langchain["metrics_enabled"] - - Enable collection of LangChain metrics. - - If the Datadog Agent is configured to use a non-default Statsd hostname - or port, use ``DD_DOGSTATSD_URL`` to configure ``ddtrace`` to use it. - - Alternatively, you can set this option with the ``DD_LANGCHAIN_METRICS_ENABLED`` environment - variable. - - Default: ``True`` - - .. py:data:: (beta) ddtrace.config.langchain["span_char_limit"] Configure the maximum number of characters for the following data within span tags: @@ -195,14 +104,4 @@ Default: ``1.0`` - -.. py:data:: (beta) ddtrace.config.langchain["log_prompt_completion_sample_rate"] - - Configure the sample rate for the collection of prompts and completions as logs. - - Alternatively, you can set this option with the ``DD_LANGCHAIN_LOG_PROMPT_COMPLETION_SAMPLE_RATE`` environment - variable. - - Default: ``0.1`` - """ # noqa: E501 diff --git a/ddtrace/contrib/_openai.py b/ddtrace/contrib/_openai.py index 8e2eb87aeb5..0642bbb0881 100644 --- a/ddtrace/contrib/_openai.py +++ b/ddtrace/contrib/_openai.py @@ -1,10 +1,8 @@ """ -The OpenAI integration instruments the OpenAI Python library to emit metrics, -traces, and logs (logs are disabled by default) for requests made to the models, -completions, chat completions, edits, images, embeddings, audio, files, fine-tunes, -and moderations endpoints. +The OpenAI integration instruments the OpenAI Python library to emit traces for requests made to the models, +completions, chat completions, images, embeddings, audio, files, and moderations endpoints. -All metrics, logs, and traces submitted from the OpenAI integration are tagged by: +All traces submitted from the OpenAI integration are tagged by: - ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. - ``openai.request.endpoint``: OpenAI API endpoint used in the request. @@ -15,84 +13,6 @@ - ``openai.user.api_key``: OpenAI API key used to make the request (obfuscated to match the OpenAI UI representation ``sk-...XXXX`` where ``XXXX`` is the last 4 digits of the key). -Metrics -~~~~~~~ - -The following metrics are collected by default by the OpenAI integration. - -.. important:: - If the Agent is configured to use a non-default Statsd hostname or port, use ``DD_DOGSTATSD_URL`` to configure - ``ddtrace`` to use it. - - -.. important:: - Ratelimit and token metrics only reflect usage of the supported completions, chat completions, and embedding - endpoints. Usage of other OpenAI endpoints will not be recorded as they are not provided. - - -.. py:data:: openai.request.duration - - The duration of the OpenAI request in seconds. - - Type: ``distribution`` - - -.. py:data:: openai.request.error - - The number of errors from requests made to OpenAI. - - Type: ``count`` - - -.. py:data:: openai.ratelimit.requests - - The maximum number of OpenAI requests permitted before exhausting the rate limit. - - Type: ``gauge`` - - -.. py:data:: openai.ratelimit.tokens - - The maximum number of OpenAI tokens permitted before exhausting the rate limit. - - Type: ``gauge`` - - -.. py:data:: openai.ratelimit.remaining.requests - - The remaining number of OpenAI requests permitted before exhausting the rate limit. - - Type: ``gauge`` - - -.. py:data:: openai.ratelimit.remaining.tokens - - The remaining number of OpenAI tokens permitted before exhausting the rate limit. - - Type: ``gauge`` - - -.. py:data:: openai.tokens.prompt - - The number of tokens used in the prompt of an OpenAI request. - - Type: ``distribution`` - - -.. py:data:: openai.tokens.completion - - The number of tokens used in the completion of a OpenAI response. - - Type: ``distribution`` - - -.. py:data:: openai.tokens.total - - The total number of tokens used in the prompt and completion of a OpenAI request/response. - - Type: ``distribution`` - - (beta) Prompt and Completion Sampling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -101,22 +21,9 @@ - Prompt inputs and completions for the ``completions`` endpoint. - Message inputs and completions for the ``chat.completions`` endpoint. - Embedding inputs for the ``embeddings`` endpoint. -- Edit inputs, instructions, and completions for the ``edits`` endpoint. - Image input filenames and completion URLs for the ``images`` endpoint. - Audio input filenames and completions for the ``audio`` endpoint. -Prompt and message inputs and completions can also be emitted as log data. -Logs are **not** emitted by default. When logs are enabled they are sampled at ``0.1``. - -Read the **Global Configuration** section for information about enabling logs and configuring sampling -rates. - -.. important:: - - To submit logs, you must set the ``DD_API_KEY`` environment variable. - - Set ``DD_SITE`` to send logs to a Datadog site such as ``datadoghq.eu``. The default is ``datadoghq.com``. - (beta) Streamed Responses Support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -172,32 +79,6 @@ Default: ``DD_SERVICE`` -.. py:data:: ddtrace.config.openai["logs_enabled"] - - Enable collection of prompts and completions as logs. You can adjust the rate of prompts and completions collected - using the sample rate configuration described below. - - Alternatively, you can set this option with the ``DD_OPENAI_LOGS_ENABLED`` environment - variable. - - Note that you must set the ``DD_API_KEY`` environment variable to enable sending logs. - - Default: ``False`` - - -.. py:data:: ddtrace.config.openai["metrics_enabled"] - - Enable collection of OpenAI metrics. - - If the Datadog Agent is configured to use a non-default Statsd hostname - or port, use ``DD_DOGSTATSD_URL`` to configure ``ddtrace`` to use it. - - Alternatively, you can set this option with the ``DD_OPENAI_METRICS_ENABLED`` environment - variable. - - Default: ``True`` - - .. py:data:: (beta) ddtrace.config.openai["span_char_limit"] Configure the maximum number of characters for the following data within span tags: @@ -225,16 +106,6 @@ Default: ``1.0`` -.. py:data:: (beta) ddtrace.config.openai["log_prompt_completion_sample_rate"] - - Configure the sample rate for the collection of prompts and completions as logs. - - Alternatively, you can set this option with the ``DD_OPENAI_LOG_PROMPT_COMPLETION_SAMPLE_RATE`` environment - variable. - - Default: ``0.1`` - - Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/ddtrace/contrib/_yaaredis.py b/ddtrace/contrib/_yaaredis.py deleted file mode 100644 index 65917b03c29..00000000000 --- a/ddtrace/contrib/_yaaredis.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -The yaaredis integration traces yaaredis requests. - - -Enabling -~~~~~~~~ - -The yaaredis integration is enabled automatically when using -:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. - -Or use :func:`patch()` to manually enable the integration:: - - from ddtrace import patch - patch(yaaredis=True) - - -Global Configuration -~~~~~~~~~~~~~~~~~~~~ - -.. py:data:: ddtrace.config.yaaredis["service"] - - The service name reported by default for yaaredis traces. - - This option can also be set with the ``DD_YAAREDIS_SERVICE`` environment - variable. - - Default: ``"redis"`` - -.. py:data:: ddtrace.config.yaaredis["cmd_max_length"] - - Max allowable size for the yaaredis command span tag. - Anything beyond the max length will be replaced with ``"..."``. - - This option can also be set with the ``DD_YAAREDIS_CMD_MAX_LENGTH`` environment - variable. - - Default: ``1000`` - -.. py:data:: ddtrace.config.aredis["resource_only_command"] - - The span resource will only include the command executed. To include all - arguments in the span resource, set this value to ``False``. - - This option can also be set with the ``DD_REDIS_RESOURCE_ONLY_COMMAND`` environment - variable. - - Default: ``True`` - - -Instance Configuration -~~~~~~~~~~~~~~~~~~~~~~ - -To configure particular yaaredis instances use the :class:`Pin ` API:: - - import yaaredis - from ddtrace.trace import Pin - - client = yaaredis.StrictRedis(host="localhost", port=6379) - - # Override service name for this instance - Pin.override(client, service="my-custom-queue") - - # Traces reported for this client will now have "my-custom-queue" - # as the service name. - async def example(): - await client.get("my-key") -""" diff --git a/ddtrace/contrib/aiohttp.py b/ddtrace/contrib/aiohttp.py index dbb5def90d1..d001139dde8 100644 --- a/ddtrace/contrib/aiohttp.py +++ b/ddtrace/contrib/aiohttp.py @@ -36,6 +36,13 @@ Default: ``False`` +.. py:data:: ddtrace.config.aiohttp['disable_stream_timing_for_mem_leak'] + + Whether or not to to address a potential memory leak in the aiohttp integration. + When set to ``True``, this flag may cause streamed response span timing to be inaccurate. + + Default: ``False`` + Server ****** diff --git a/ddtrace/contrib/internal/aiohttp/middlewares.py b/ddtrace/contrib/internal/aiohttp/middlewares.py index b3dde240d44..c1a5b8e4f3b 100644 --- a/ddtrace/contrib/internal/aiohttp/middlewares.py +++ b/ddtrace/contrib/internal/aiohttp/middlewares.py @@ -59,8 +59,9 @@ async def attach_context(request): request[REQUEST_CONFIG_KEY] = app[CONFIG_KEY] try: response = await handler(request) - if isinstance(response, web.StreamResponse): - request.task.add_done_callback(lambda _: finish_request_span(request, response)) + if not config.aiohttp["disable_stream_timing_for_mem_leak"]: + if isinstance(response, web.StreamResponse): + request.task.add_done_callback(lambda _: finish_request_span(request, response)) return response except Exception: req_span.set_traceback() @@ -134,9 +135,13 @@ async def on_prepare(request, response): the trace middleware execution. """ # NB isinstance is not appropriate here because StreamResponse is a parent of the other - # aiohttp response types - if type(response) is web.StreamResponse and not response.task.done(): - return + # aiohttp response types. However in some cases this can also lead to missing the closing of + # spans, leading to a memory leak, which is why we have this flag. + # todo: this is a temporary fix for a memory leak in aiohttp. We should find a way to + # consistently close spans with the correct timing. + if not config.aiohttp["disable_stream_timing_for_mem_leak"]: + if type(response) is web.StreamResponse and not response.task.done(): + return finish_request_span(request, response) diff --git a/ddtrace/contrib/internal/aiohttp/patch.py b/ddtrace/contrib/internal/aiohttp/patch.py index 900a8d26e41..4643ba2ae43 100644 --- a/ddtrace/contrib/internal/aiohttp/patch.py +++ b/ddtrace/contrib/internal/aiohttp/patch.py @@ -22,6 +22,7 @@ from ddtrace.internal.utils import get_argument_value from ddtrace.internal.utils.formats import asbool from ddtrace.propagation.http import HTTPPropagator +from ddtrace.settings._core import get_config as _get_config from ddtrace.trace import Pin @@ -31,7 +32,12 @@ # Server config config._add( "aiohttp", - dict(distributed_tracing=True), + dict( + distributed_tracing=True, + disable_stream_timing_for_mem_leak=asbool( + _get_config("DD_AIOHTTP_CLIENT_DISABLE_STREAM_TIMING_FOR_MEM_LEAK", default=False) + ), + ), ) config._add( diff --git a/ddtrace/contrib/internal/asgi/middleware.py b/ddtrace/contrib/internal/asgi/middleware.py index 7da2b11b929..70b284c8882 100644 --- a/ddtrace/contrib/internal/asgi/middleware.py +++ b/ddtrace/contrib/internal/asgi/middleware.py @@ -150,12 +150,8 @@ async def __call__(self, scope, receive, send): if scope["type"] == "http": operation_name = schematize_url_operation(operation_name, direction=SpanDirection.INBOUND, protocol="http") - # Calling ddtrace.trace.Pin(...) with the `tracer` argument is deprecated - # Remove this if statement when the `tracer` argument is removed - if self.tracer is ddtrace.tracer: - pin = ddtrace.trace.Pin(service="asgi") - else: - pin = ddtrace.trace.Pin(service="asgi", tracer=self.tracer) + pin = ddtrace.trace.Pin(service="asgi") + pin._tracer = self.tracer with core.context_with_data( "asgi.__call__", diff --git a/ddtrace/contrib/internal/asyncio/compat.py b/ddtrace/contrib/internal/asyncio/compat.py deleted file mode 100644 index 95be608a3cc..00000000000 --- a/ddtrace/contrib/internal/asyncio/compat.py +++ /dev/null @@ -1,29 +0,0 @@ -import asyncio - -from ddtrace.vendor.debtcollector import deprecate - - -if hasattr(asyncio, "current_task"): - - def asyncio_current_task(): - deprecate( - "ddtrace.contrib.internal.asyncio.create_task(..) is deprecated. " - "The ddtrace library fully supports propagating " - "trace contextes to async tasks. No additional configurations are required.", - version="3.0.0", - ) - try: - return asyncio.current_task() - except RuntimeError: - return None - -else: - - def asyncio_current_task(): - deprecate( - "ddtrace.contrib.internal.asyncio.create_task(..) is deprecated. " - "The ddtrace library fully supports propagating " - "trace contextes to async tasks. No additional configurations are required.", - version="3.0.0", - ) - return asyncio.Task.current_task() diff --git a/ddtrace/contrib/internal/asyncio/helpers.py b/ddtrace/contrib/internal/asyncio/helpers.py deleted file mode 100644 index e5d56705aa5..00000000000 --- a/ddtrace/contrib/internal/asyncio/helpers.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -This module includes a list of convenience methods that -can be used to simplify some operations while handling -Context and Spans in instrumented ``asyncio`` code. -""" -import asyncio - -import ddtrace -from ddtrace.contrib.internal.asyncio.provider import AsyncioContextProvider -from ddtrace.contrib.internal.asyncio.wrappers import wrapped_create_task -from ddtrace.vendor.debtcollector import deprecate - - -def set_call_context(task, ctx): - """ - Updates the ``Context`` for the given Task. Useful when you need to - pass the context among different tasks. - - This method is available for backward-compatibility. Use the - ``AsyncioContextProvider`` API to set the current active ``Context``. - """ - deprecate( - "ddtrace.contrib.internal.asyncio.set_call_context(..) is deprecated. " - "The ddtrace library fully supports propagating " - "trace contextes to async tasks. No additional configurations are required.", - version="3.0.0", - ) - setattr(task, AsyncioContextProvider._CONTEXT_ATTR, ctx) - - -def ensure_future(coro_or_future, *, loop=None, tracer=None): - """Wrapper that sets a context to the newly created Task. - - If the current task already has a Context, it will be attached to the new Task so the Trace list will be preserved. - """ - deprecate( - "ddtrace.contrib.internal.asyncio.ensure_future(..) is deprecated. " - "The ddtrace library fully supports propagating " - "trace contextes to async tasks. No additional configurations are required.", - version="3.0.0", - ) - tracer = tracer or ddtrace.tracer - current_ctx = tracer.current_trace_context() - task = asyncio.ensure_future(coro_or_future, loop=loop) - set_call_context(task, current_ctx) - return task - - -def run_in_executor(loop, executor, func, *args, tracer=None): - """Wrapper function that sets a context to the newly created Thread. - - If the current task has a Context, it will be attached as an empty Context with the current_span activated to - inherit the ``trace_id`` and the ``parent_id``. - - Because the Executor can run the Thread immediately or after the - coroutine is executed, we may have two different scenarios: - * the Context is copied in the new Thread and the trace is sent twice - * the coroutine flushes the Context and when the Thread copies the - Context it is already empty (so it will be a root Span) - - To support both situations, we create a new Context that knows only what was - the latest active Span when the new thread was created. In this new thread, - we fallback to the thread-local ``Context`` storage. - - """ - deprecate( - "ddtrace.contrib.internal.asyncio.run_in_executor(..) is deprecated. " - "The ddtrace library fully supports propagating " - "trace contexts to async tasks. No additional configurations are required.", - version="3.0.0", - ) - tracer = tracer or ddtrace.tracer - current_ctx = tracer.current_trace_context() - - # prepare the future using an executor wrapper - future = loop.run_in_executor(executor, _wrap_executor, func, args, tracer, current_ctx) - return future - - -def _wrap_executor(fn, args, tracer, ctx): - """ - This function is executed in the newly created Thread so the right - ``Context`` can be set in the thread-local storage. This operation - is safe because the ``Context`` class is thread-safe and can be - updated concurrently. - """ - # the AsyncioContextProvider knows that this is a new thread - # so it is legit to pass the Context in the thread-local storage; - # fn() will be executed outside the asyncio loop as a synchronous code - tracer.context_provider.activate(ctx) - return fn(*args) - - -def create_task(*args, **kwargs): - """This function spawns a task with a Context that inherits the - `trace_id` and the `parent_id` from the current active one if available. - """ - deprecate( - "ddtrace.contrib.internal.asyncio.create_task(..) is deprecated. " - "The ddtrace library fully supports propagating " - "trace contexts to async tasks. No additional configurations are required.", - version="3.0.0", - ) - loop = asyncio.get_event_loop() - return wrapped_create_task(loop.create_task, None, args, kwargs) diff --git a/ddtrace/contrib/internal/asyncio/provider.py b/ddtrace/contrib/internal/asyncio/provider.py deleted file mode 100644 index fac41470740..00000000000 --- a/ddtrace/contrib/internal/asyncio/provider.py +++ /dev/null @@ -1,83 +0,0 @@ -import asyncio - -from ddtrace._trace.provider import BaseContextProvider -from ddtrace._trace.provider import DatadogContextMixin -from ddtrace.trace import Span -from ddtrace.vendor.debtcollector import deprecate - - -class AsyncioContextProvider(BaseContextProvider, DatadogContextMixin): - """Manages the active context for asyncio execution. Framework - instrumentation that is built on top of the ``asyncio`` library, should - use this provider when contextvars are not available (Python versions - less than 3.7). - - This Context Provider inherits from ``DefaultContextProvider`` because - it uses a thread-local storage when the ``Context`` is propagated to - a different thread, than the one that is running the async loop. - """ - - # Task attribute used to set/get the context - _CONTEXT_ATTR = "__datadog_context" - - def __init__(self) -> None: - deprecate( - "The `ddtrace.contrib.internal.asyncio.AsyncioContextProvider` class is deprecated." - " Use `ddtrace.DefaultContextProvider` instead.", - version="3.0.0", - ) - super().__init__() - - def activate(self, context, loop=None): - """Sets the scoped ``Context`` for the current running ``Task``.""" - loop = self._get_loop(loop) - if not loop: - super(AsyncioContextProvider, self).activate(context) - return context - - # the current unit of work (if tasks are used) - task = asyncio.Task.current_task(loop=loop) - if task: - setattr(task, self._CONTEXT_ATTR, context) - return context - - def _get_loop(self, loop=None): - """Helper to try and resolve the current loop""" - try: - return loop or asyncio.get_event_loop() - except RuntimeError: - # Detects if a loop is available in the current thread; - # DEV: This happens when a new thread is created from the out that is running the async loop - # DEV: It's possible that a different Executor is handling a different Thread that - # works with blocking code. In that case, we fallback to a thread-local Context. - pass - return None - - def _has_active_context(self, loop=None): - """Helper to determine if we have a currently active context""" - loop = self._get_loop(loop=loop) - if loop is None: - return super(AsyncioContextProvider, self)._has_active_context() - - # the current unit of work (if tasks are used) - task = asyncio.Task.current_task(loop=loop) - if task is None: - return False - - ctx = getattr(task, self._CONTEXT_ATTR, None) - return ctx is not None - - def active(self, loop=None): - """Returns the active context for the execution.""" - loop = self._get_loop(loop=loop) - if not loop: - return super(AsyncioContextProvider, self).active() - - # the current unit of work (if tasks are used) - task = asyncio.Task.current_task(loop=loop) - if task is None: - return None - ctx = getattr(task, self._CONTEXT_ATTR, None) - if isinstance(ctx, Span): - return self._update_active(ctx) - return ctx diff --git a/ddtrace/contrib/internal/asyncio/wrappers.py b/ddtrace/contrib/internal/asyncio/wrappers.py deleted file mode 100644 index 1166fed96c3..00000000000 --- a/ddtrace/contrib/internal/asyncio/wrappers.py +++ /dev/null @@ -1,25 +0,0 @@ -from ddtrace.contrib.internal.asyncio.compat import asyncio_current_task -from ddtrace.contrib.internal.asyncio.provider import AsyncioContextProvider - - -def wrapped_create_task(wrapped, instance, args, kwargs): - """Wrapper for ``create_task(coro)`` that propagates the current active - ``Context`` to the new ``Task``. This function is useful to connect traces - of detached executions. - - Note: we can't just link the task contexts due to the following scenario: - * begin task A - * task A starts task B1..B10 - * finish task B1-B9 (B10 still on trace stack) - * task A starts task C - * now task C gets parented to task B10 since it's still on the stack, - however was not actually triggered by B10 - """ - new_task = wrapped(*args, **kwargs) - current_task = asyncio_current_task() - - ctx = getattr(current_task, AsyncioContextProvider._CONTEXT_ATTR, None) - if ctx: - setattr(new_task, AsyncioContextProvider._CONTEXT_ATTR, ctx) - - return new_task diff --git a/ddtrace/contrib/internal/django/patch.py b/ddtrace/contrib/internal/django/patch.py index ba96675fef3..3fea1e9b6a0 100644 --- a/ddtrace/contrib/internal/django/patch.py +++ b/ddtrace/contrib/internal/django/patch.py @@ -17,7 +17,6 @@ import wrapt from wrapt.importer import when_imported -import ddtrace from ddtrace import config from ddtrace.appsec._utils import _UserInfoRetriever from ddtrace.constants import SPAN_KIND @@ -149,12 +148,9 @@ def cursor(django, pin, func, instance, args, kwargs): tags = {"django.db.vendor": vendor, "django.db.alias": alias} tags.update(getattr(conn, "_datadog_tags", {})) - # Calling ddtrace.pin.Pin(...) with the `tracer` argument generates a deprecation warning. - # Remove this if statement when the `tracer` argument is removed - if pin.tracer is ddtrace.tracer: - pin = Pin(service, tags=tags) - else: - pin = Pin(service, tags=tags, tracer=pin.tracer) + tracer = pin.tracer + pin = Pin(service, tags=tags) + pin._tracer = tracer cursor = func(*args, **kwargs) diff --git a/ddtrace/contrib/internal/gevent/provider.py b/ddtrace/contrib/internal/gevent/provider.py deleted file mode 100644 index c07b0512d6d..00000000000 --- a/ddtrace/contrib/internal/gevent/provider.py +++ /dev/null @@ -1,47 +0,0 @@ -import gevent - -from ddtrace._trace.provider import BaseContextProvider -from ddtrace._trace.provider import DatadogContextMixin -from ddtrace.trace import Span -from ddtrace.vendor.debtcollector import deprecate - - -class GeventContextProvider(BaseContextProvider, DatadogContextMixin): - """Manages the active context for gevent execution. - - This provider depends on corresponding monkey patches to copy the active - context from one greenlet to another. - """ - - # Greenlet attribute used to set/get the context - _CONTEXT_ATTR = "__datadog_context" - - def __init__(self) -> None: - deprecate("GeventContextProvider is deprecated and will be removed in a future version.", "3.0.0") - super().__init__() - - def _get_current_context(self): - """Helper to get the active context from the current greenlet.""" - current_g = gevent.getcurrent() - if current_g is not None: - return getattr(current_g, self._CONTEXT_ATTR, None) - return None - - def _has_active_context(self): - """Helper to determine if there is an active context.""" - return self._get_current_context() is not None - - def activate(self, context): - """Sets the active context for the current running ``Greenlet``.""" - current_g = gevent.getcurrent() - if current_g is not None: - setattr(current_g, self._CONTEXT_ATTR, context) - super(GeventContextProvider, self).activate(context) - return context - - def active(self): - """Returns the active context for this execution flow.""" - ctx = self._get_current_context() - if isinstance(ctx, Span): - return self._update_active(ctx) - return ctx diff --git a/ddtrace/contrib/internal/httplib/patch.py b/ddtrace/contrib/internal/httplib/patch.py index 79a8ea2816f..7db5d59d31c 100644 --- a/ddtrace/contrib/internal/httplib/patch.py +++ b/ddtrace/contrib/internal/httplib/patch.py @@ -5,7 +5,6 @@ import wrapt from ddtrace import config -from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_request_asm from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND from ddtrace.contrib import trace_utils @@ -77,12 +76,14 @@ def _wrap_getresponse(func, instance, args, kwargs): def _call_asm_wrap(func, instance, *args, **kwargs): + from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_request_asm + _wrap_request_asm(func, instance, args, kwargs) def _wrap_request(func, instance, args, kwargs): # Use any attached tracer if available, otherwise use the global tracer - if asm_config._iast_enabled or asm_config._asm_enabled: + if asm_config._iast_enabled or (asm_config._asm_enabled and asm_config._ep_enabled): func_to_call = functools.partial(_call_asm_wrap, func, instance) else: func_to_call = func diff --git a/ddtrace/contrib/internal/langchain/constants.py b/ddtrace/contrib/internal/langchain/constants.py index cdc0fc47cc2..40ea9e7a993 100644 --- a/ddtrace/contrib/internal/langchain/constants.py +++ b/ddtrace/contrib/internal/langchain/constants.py @@ -80,7 +80,3 @@ } API_KEY = "langchain.request.api_key" -MODEL = "langchain.request.model" -COMPLETION_TOKENS = "langchain.tokens.completion_tokens" -PROMPT_TOKENS = "langchain.tokens.prompt_tokens" -TOTAL_COST = "langchain.tokens.total_cost" diff --git a/ddtrace/contrib/internal/langchain/patch.py b/ddtrace/contrib/internal/langchain/patch.py index 9badbf22d87..8b5b25a581e 100644 --- a/ddtrace/contrib/internal/langchain/patch.py +++ b/ddtrace/contrib/internal/langchain/patch.py @@ -41,10 +41,6 @@ from ddtrace import config from ddtrace.contrib.internal.langchain.constants import API_KEY -from ddtrace.contrib.internal.langchain.constants import COMPLETION_TOKENS -from ddtrace.contrib.internal.langchain.constants import MODEL -from ddtrace.contrib.internal.langchain.constants import PROMPT_TOKENS -from ddtrace.contrib.internal.langchain.constants import TOTAL_COST from ddtrace.contrib.internal.langchain.constants import agent_output_parser_classes from ddtrace.contrib.internal.langchain.constants import text_embedding_models from ddtrace.contrib.internal.langchain.constants import vectorstore_classes @@ -56,7 +52,6 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.utils import ArgumentError from ddtrace.internal.utils import get_argument_value -from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import deep_getattr from ddtrace.internal.utils.version import parse_version from ddtrace.llmobs._integrations import LangChainIntegration @@ -76,10 +71,7 @@ def get_version(): config._add( "langchain", { - "logs_enabled": asbool(os.getenv("DD_LANGCHAIN_LOGS_ENABLED", False)), - "metrics_enabled": asbool(os.getenv("DD_LANGCHAIN_METRICS_ENABLED", True)), "span_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)), - "log_prompt_completion_sample_rate": float(os.getenv("DD_LANGCHAIN_LOG_PROMPT_COMPLETION_SAMPLE_RATE", 0.1)), "span_char_limit": int(os.getenv("DD_LANGCHAIN_SPAN_CHAR_LIMIT", 128)), }, ) @@ -118,9 +110,7 @@ def _extract_api_key(instance: Any) -> str: return "" -def _tag_openai_token_usage( - span: Span, llm_output: Dict[str, Any], propagated_cost: int = 0, propagate: bool = False -) -> None: +def _tag_openai_token_usage(span: Span, llm_output: Dict[str, Any]) -> None: """ Extract token usage from llm_output, tag on span. Calculate the total cost for each LLM/chat_model, then propagate those values up the trace so that @@ -130,23 +120,6 @@ def _tag_openai_token_usage( current_metric_value = span.get_metric("langchain.tokens.%s_tokens" % token_type) or 0 metric_value = llm_output["token_usage"].get("%s_tokens" % token_type, 0) span.set_metric("langchain.tokens.%s_tokens" % token_type, current_metric_value + metric_value) - total_cost = span.get_metric(TOTAL_COST) or 0 - if not propagate and get_openai_token_cost_for_model: - try: - completion_cost = get_openai_token_cost_for_model( - span.get_tag(MODEL), - span.get_metric(COMPLETION_TOKENS), - is_completion=True, - ) - prompt_cost = get_openai_token_cost_for_model(span.get_tag(MODEL), span.get_metric(PROMPT_TOKENS)) - total_cost = completion_cost + prompt_cost - except ValueError: - # If not in langchain's openai model catalog, the above helpers will raise a ValueError. - log.debug("Cannot calculate token/cost as the model is not in LangChain's OpenAI model catalog.") - if get_openai_token_cost_for_model: - span.set_metric(TOTAL_COST, propagated_cost + total_cost) - if span._parent is not None: - _tag_openai_token_usage(span._parent, llm_output, propagated_cost=propagated_cost + total_cost, propagate=True) def _is_openai_llm_instance(instance): @@ -221,7 +194,6 @@ def traced_llm_generate(langchain, pin, func, instance, args, kwargs): completions = func(*args, **kwargs) if _is_openai_llm_instance(instance): _tag_openai_token_usage(span, completions.llm_output) - integration.record_usage(span, completions.llm_output) for idx, completion in enumerate(completions.generations): if integration.is_pc_sampled_span(span): @@ -237,28 +209,10 @@ def traced_llm_generate(langchain, pin, func, instance, args, kwargs): ) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=completions, operation="llm") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) - if integration.is_pc_sampled_log(span): - if completions is None: - log_completions = [] - else: - log_completions = [ - [{"text": completion.text} for completion in completions] for completions in completions.generations - ] - integration.log( - span, - "info" if span.error == 0 else "error", - "sampled %s.%s" % (instance.__module__, instance.__class__.__name__), - attrs={ - "prompts": prompts, - "choices": log_completions, - }, - ) return completions @@ -292,7 +246,6 @@ async def traced_llm_agenerate(langchain, pin, func, instance, args, kwargs): completions = await func(*args, **kwargs) if _is_openai_llm_instance(instance): _tag_openai_token_usage(span, completions.llm_output) - integration.record_usage(span, completions.llm_output) for idx, completion in enumerate(completions.generations): if integration.is_pc_sampled_span(span): @@ -308,28 +261,10 @@ async def traced_llm_agenerate(langchain, pin, func, instance, args, kwargs): ) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=completions, operation="llm") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) - if integration.is_pc_sampled_log(span): - if completions is None: - log_completions = [] - else: - log_completions = [ - [{"text": completion.text} for completion in completions] for completions in completions.generations - ] - integration.log( - span, - "info" if span.error == 0 else "error", - "sampled %s.%s" % (instance.__module__, instance.__class__.__name__), - attrs={ - "prompts": prompts, - "choices": log_completions, - }, - ) return completions @@ -376,7 +311,6 @@ def traced_chat_model_generate(langchain, pin, func, instance, args, kwargs): chat_completions = func(*args, **kwargs) if _is_openai_chat_instance(instance): _tag_openai_token_usage(span, chat_completions.llm_output) - integration.record_usage(span, chat_completions.llm_output) for message_set_idx, message_set in enumerate(chat_completions.generations): for idx, chat_completion in enumerate(message_set): @@ -417,45 +351,10 @@ def traced_chat_model_generate(langchain, pin, func, instance, args, kwargs): ) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=chat_completions, operation="chat") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) - if integration.is_pc_sampled_log(span): - if chat_completions is None: - log_chat_completions = [] - else: - log_chat_completions = [ - [ - {"content": message.text, "message_type": message.message.__class__.__name__} - for message in messages - ] - for messages in chat_completions.generations - ] - integration.log( - span, - "info" if span.error == 0 else "error", - "sampled %s.%s" % (instance.__module__, instance.__class__.__name__), - attrs={ - "messages": [ - [ - { - "content": ( - message.get("content", "") - if isinstance(message, dict) - else str(getattr(message, "content", "")) - ), - "message_type": message.__class__.__name__, - } - for message in messages - ] - for messages in chat_messages - ], - "choices": log_chat_completions, - }, - ) return chat_completions @@ -502,7 +401,6 @@ async def traced_chat_model_agenerate(langchain, pin, func, instance, args, kwar chat_completions = await func(*args, **kwargs) if _is_openai_chat_instance(instance): _tag_openai_token_usage(span, chat_completions.llm_output) - integration.record_usage(span, chat_completions.llm_output) for message_set_idx, message_set in enumerate(chat_completions.generations): for idx, chat_completion in enumerate(message_set): @@ -542,45 +440,10 @@ async def traced_chat_model_agenerate(langchain, pin, func, instance, args, kwar ) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=chat_completions, operation="chat") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) - if integration.is_pc_sampled_log(span): - if chat_completions is None: - log_chat_completions = [] - else: - log_chat_completions = [ - [ - {"content": message.text, "message_type": message.message.__class__.__name__} - for message in messages - ] - for messages in chat_completions.generations - ] - integration.log( - span, - "info" if span.error == 0 else "error", - "sampled %s.%s" % (instance.__module__, instance.__class__.__name__), - attrs={ - "messages": [ - [ - { - "content": ( - message.get("content", "") - if isinstance(message, dict) - else str(getattr(message, "content", "")) - ), - "message_type": message.__class__.__name__, - } - for message in messages - ] - for messages in chat_messages - ], - "choices": log_chat_completions, - }, - ) return chat_completions @@ -627,19 +490,10 @@ def traced_embedding(langchain, pin, func, instance, args, kwargs): span.set_metric("langchain.response.outputs.embedding_length", len(embeddings)) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=embeddings, operation="embedding") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) - if integration.is_pc_sampled_log(span): - integration.log( - span, - "info" if span.error == 0 else "error", - "sampled %s.%s" % (instance.__module__, instance.__class__.__name__), - attrs={"inputs": [input_texts] if isinstance(input_texts, str) else input_texts}, - ) return embeddings @@ -689,12 +543,10 @@ def traced_lcel_runnable_sequence(langchain, pin, func, instance, args, kwargs): span.set_tag_str("langchain.response.outputs.%d" % idx, integration.trunc(str(output))) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=[], kwargs=inputs, response=final_output, operation="chain") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) return final_output @@ -735,12 +587,10 @@ async def traced_lcel_runnable_sequence_async(langchain, pin, func, instance, ar span.set_tag_str("langchain.response.outputs.%d" % idx, integration.trunc(str(output))) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=[], kwargs=inputs, response=final_output, operation="chain") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) return final_output @@ -793,25 +643,10 @@ def traced_similarity_search(langchain, pin, func, instance, args, kwargs): ) except Exception: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) raise finally: integration.llmobs_set_tags(span, args=args, kwargs=kwargs, response=documents, operation="retrieval") span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) - if integration.is_pc_sampled_log(span): - integration.log( - span, - "info" if span.error == 0 else "error", - "sampled %s.%s" % (instance.__module__, instance.__class__.__name__), - attrs={ - "query": query, - "k": k or "", - "documents": [ - {"page_content": document.page_content, "metadata": document.metadata} for document in documents - ], - }, - ) return documents @@ -1255,29 +1090,6 @@ def unpatch(): delattr(langchain, "_datadog_integration") -def taint_outputs(instance, inputs, outputs): - from ddtrace.appsec._iast._metrics import _set_iast_error_metric - from ddtrace.appsec._iast._taint_tracking._taint_objects import get_tainted_ranges - from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject - - try: - ranges = None - for key in filter(lambda x: x in inputs, instance.input_keys): - input_val = inputs.get(key) - if input_val: - ranges = get_tainted_ranges(input_val) - if ranges: - break - - if ranges: - source = ranges[0].source - for key in filter(lambda x: x in outputs, instance.output_keys): - output_value = outputs[key] - outputs[key] = taint_pyobject(output_value, source.name, source.value, source.origin) - except Exception as e: - _set_iast_error_metric("IAST propagation error. langchain taint_outputs. {}".format(e)) - - def taint_parser_output(func, instance, args, kwargs): from ddtrace.appsec._iast._metrics import _set_iast_error_metric from ddtrace.appsec._iast._taint_tracking._taint_objects import get_tainted_ranges diff --git a/ddtrace/contrib/internal/mongoengine/trace.py b/ddtrace/contrib/internal/mongoengine/trace.py index 5539cf1d2e4..49ed5ee2590 100644 --- a/ddtrace/contrib/internal/mongoengine/trace.py +++ b/ddtrace/contrib/internal/mongoengine/trace.py @@ -29,11 +29,10 @@ def __call__(self, *args, **kwargs): client = self.__wrapped__(*args, **kwargs) pin = ddtrace.trace.Pin.get_from(self) if pin: - # Calling ddtrace.trace.Pin(...) with the `tracer` argument generates a deprecation warning. - # Remove this if statement when the `tracer` argument is removed - if pin.tracer is ddtrace.tracer: - ddtrace.trace.Pin(service=pin.service).onto(client) - else: - ddtrace.trace.Pin(service=pin.service, tracer=pin.tracer).onto(client) + tracer = pin.tracer + pp = ddtrace.trace.Pin(service=pin.service) + if tracer is not None: + pp._tracer = tracer + pp.onto(client) return client diff --git a/ddtrace/contrib/internal/mysql/patch.py b/ddtrace/contrib/internal/mysql/patch.py index 6425bd33766..0aad999e546 100644 --- a/ddtrace/contrib/internal/mysql/patch.py +++ b/ddtrace/contrib/internal/mysql/patch.py @@ -4,8 +4,6 @@ import wrapt from ddtrace import config -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.contrib.dbapi import TracedConnection from ddtrace.contrib.internal.trace_utils import _convert_to_string from ddtrace.ext import db @@ -51,6 +49,9 @@ def patch(): mysql.connector.Connect = mysql.connector.connect if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION + _set_metric_iast_instrumented_sink(VULN_SQL_INJECTION) mysql.connector._datadog_patch = True diff --git a/ddtrace/contrib/internal/mysqldb/patch.py b/ddtrace/contrib/internal/mysqldb/patch.py index cde0f58629f..fe9c62bbd5e 100644 --- a/ddtrace/contrib/internal/mysqldb/patch.py +++ b/ddtrace/contrib/internal/mysqldb/patch.py @@ -4,8 +4,6 @@ from wrapt import wrap_function_wrapper as _w from ddtrace import config -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.constants import _SPAN_MEASURED_KEY from ddtrace.constants import SPAN_KIND from ddtrace.contrib.dbapi import TracedConnection @@ -67,6 +65,9 @@ def patch(): _w("MySQLdb", "connect", _connect) if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION + _set_metric_iast_instrumented_sink(VULN_SQL_INJECTION) diff --git a/ddtrace/contrib/internal/openai/_endpoint_hooks.py b/ddtrace/contrib/internal/openai/_endpoint_hooks.py index 00ee44aef4b..786bb67f919 100644 --- a/ddtrace/contrib/internal/openai/_endpoint_hooks.py +++ b/ddtrace/contrib/internal/openai/_endpoint_hooks.py @@ -112,7 +112,6 @@ def shared_gen(): _process_finished_stream(integration, span, kwargs, streamed_chunks, is_completion=is_completion) finally: span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) if _is_async_generator(resp): @@ -199,16 +198,6 @@ def _record_response(self, pin, integration, span, args, kwargs, resp, error): resp = super()._record_response(pin, integration, span, args, kwargs, resp, error) if kwargs.get("stream") and error is None: return self._handle_streamed_response(integration, span, kwargs, resp, is_completion=True) - if integration.is_pc_sampled_log(span): - attrs_dict = {"prompt": kwargs.get("prompt", "")} - if error is None: - log_choices = resp.choices - if hasattr(resp.choices[0], "model_dump"): - log_choices = [choice.model_dump() for choice in resp.choices] - attrs_dict.update({"choices": log_choices}) - integration.log( - span, "info" if error is None else "error", "sampled %s" % self.OPERATION_ID, attrs=attrs_dict - ) integration.llmobs_set_tags(span, args=[], kwargs=kwargs, response=resp, operation="completion") if not resp: return @@ -268,14 +257,6 @@ def _record_response(self, pin, integration, span, args, kwargs, resp, error): resp = super()._record_response(pin, integration, span, args, kwargs, resp, error) if kwargs.get("stream") and error is None: return self._handle_streamed_response(integration, span, kwargs, resp, is_completion=False) - if integration.is_pc_sampled_log(span): - log_choices = resp.choices - if hasattr(resp.choices[0], "model_dump"): - log_choices = [choice.model_dump() for choice in resp.choices] - attrs_dict = {"messages": kwargs.get("messages", []), "completion": log_choices} - integration.log( - span, "info" if error is None else "error", "sampled %s" % self.OPERATION_ID, attrs=attrs_dict - ) integration.llmobs_set_tags(span, args=[], kwargs=kwargs, response=resp, operation="chat") if not resp: return @@ -518,26 +499,6 @@ def _record_request(self, pin, integration, instance, span, args, kwargs): def _record_response(self, pin, integration, span, args, kwargs, resp, error): resp = super()._record_response(pin, integration, span, args, kwargs, resp, error) - if integration.is_pc_sampled_log(span): - attrs_dict = {} - if kwargs.get("response_format", "") == "b64_json": - attrs_dict.update({"choices": [{"b64_json": "returned"} for _ in resp.data]}) - else: - log_choices = resp.data - if hasattr(resp.data[0], "model_dump"): - log_choices = [choice.model_dump() for choice in resp.data] - attrs_dict.update({"choices": log_choices}) - if "prompt" in self._request_kwarg_params: - attrs_dict.update({"prompt": kwargs.get("prompt", "")}) - if "image" in self._request_kwarg_params: - image = args[0] if len(args) >= 1 else kwargs.get("image", "") - attrs_dict.update({"image": image.name.split("/")[-1]}) - if "mask" in self._request_kwarg_params: - mask = args[1] if len(args) >= 2 else kwargs.get("mask", "") - attrs_dict.update({"mask": mask.name.split("/")[-1]}) - integration.log( - span, "info" if error is None else "error", "sampled %s" % self.OPERATION_ID, attrs=attrs_dict - ) if not resp: return choices = resp.data @@ -629,19 +590,6 @@ def _record_response(self, pin, integration, span, args, kwargs, resp, error): span.set_metric("openai.response.segments_count", len(resp_to_tag.get("segments"))) if integration.is_pc_sampled_span(span): span.set_tag_str("openai.response.text", integration.trunc(text)) - if integration.is_pc_sampled_log(span): - file_input = args[1] if len(args) >= 2 else kwargs.get("file", "") - integration.log( - span, - "info" if error is None else "error", - "sampled %s" % self.OPERATION_ID, - attrs={ - "file": getattr(file_input, "name", "").split("/")[-1], - "prompt": kwargs.get("prompt", ""), - "language": kwargs.get("language", ""), - "text": text, - }, - ) return resp diff --git a/ddtrace/contrib/internal/openai/patch.py b/ddtrace/contrib/internal/openai/patch.py index 3696314acc4..812c786dfc4 100644 --- a/ddtrace/contrib/internal/openai/patch.py +++ b/ddtrace/contrib/internal/openai/patch.py @@ -10,7 +10,6 @@ from ddtrace.contrib.trace_utils import with_traced_module from ddtrace.contrib.trace_utils import wrap from ddtrace.internal.logger import get_logger -from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.formats import deep_getattr from ddtrace.internal.utils.version import parse_version from ddtrace.llmobs._integrations import OpenAIIntegration @@ -23,10 +22,7 @@ config._add( "openai", { - "logs_enabled": asbool(os.getenv("DD_OPENAI_LOGS_ENABLED", False)), - "metrics_enabled": asbool(os.getenv("DD_OPENAI_METRICS_ENABLED", True)), "span_prompt_completion_sample_rate": float(os.getenv("DD_OPENAI_SPAN_PROMPT_COMPLETION_SAMPLE_RATE", 1.0)), - "log_prompt_completion_sample_rate": float(os.getenv("DD_OPENAI_LOG_PROMPT_COMPLETION_SAMPLE_RATE", 0.1)), "span_char_limit": int(os.getenv("DD_OPENAI_SPAN_CHAR_LIMIT", 128)), }, ) @@ -183,7 +179,6 @@ def _traced_endpoint(endpoint_hook, integration, instance, pin, args, kwargs): # Record any error information if err is not None: span.set_exc_info(*sys.exc_info()) - integration.metric(span, "incr", "request.error", 1) # Pass the response and the error to the hook try: @@ -196,7 +191,6 @@ def _traced_endpoint(endpoint_hook, integration, instance, pin, args, kwargs): # Streamed responses with error will need to be finished manually as well. if not kwargs.get("stream") or err is not None: span.finish() - integration.metric(span, "dist", "request.duration", span.duration_ns) def _patched_endpoint(openai, patch_hook): @@ -256,7 +250,6 @@ async def patched_endpoint(openai, pin, func, instance, args, kwargs): @with_traced_module def patched_convert(openai, pin, func, instance, args, kwargs): """Patch convert captures header information in the openai response""" - integration = openai._datadog_integration span = pin.tracer.current_span() if not span: return func(*args, **kwargs) @@ -281,23 +274,19 @@ def patched_convert(openai, pin, func, instance, args, kwargs): if headers.get("x-ratelimit-limit-requests"): v = headers.get("x-ratelimit-limit-requests") if v is not None: - integration.metric(span, "gauge", "ratelimit.requests", int(v)) span.set_metric("openai.organization.ratelimit.requests.limit", int(v)) if headers.get("x-ratelimit-limit-tokens"): v = headers.get("x-ratelimit-limit-tokens") if v is not None: - integration.metric(span, "gauge", "ratelimit.tokens", int(v)) span.set_metric("openai.organization.ratelimit.tokens.limit", int(v)) # Gauge and set span info for remaining requests and tokens if headers.get("x-ratelimit-remaining-requests"): v = headers.get("x-ratelimit-remaining-requests") if v is not None: - integration.metric(span, "gauge", "ratelimit.remaining.requests", int(v)) span.set_metric("openai.organization.ratelimit.requests.remaining", int(v)) if headers.get("x-ratelimit-remaining-tokens"): v = headers.get("x-ratelimit-remaining-tokens") if v is not None: - integration.metric(span, "gauge", "ratelimit.remaining.tokens", int(v)) span.set_metric("openai.organization.ratelimit.tokens.remaining", int(v)) return func(*args, **kwargs) diff --git a/ddtrace/contrib/internal/pylibmc/client.py b/ddtrace/contrib/internal/pylibmc/client.py index 5c48e8465f8..e6b367b243b 100644 --- a/ddtrace/contrib/internal/pylibmc/client.py +++ b/ddtrace/contrib/internal/pylibmc/client.py @@ -51,12 +51,8 @@ def __init__(self, client=None, service=memcached.SERVICE, tracer=None, *args, * super(TracedClient, self).__init__(client) schematized_service = schematize_service_name(service) - # Calling ddtrace.trace.Pin(...) with the `tracer` argument generates a deprecation warning. - # Remove this if statement when the `tracer` argument is removed - if tracer is ddtrace.tracer: - pin = ddtrace.trace.Pin(service=schematized_service) - else: - pin = ddtrace.trace.Pin(service=schematized_service, tracer=tracer) + pin = ddtrace.trace.Pin(service=schematized_service) + pin._tracer = tracer pin.onto(self) # attempt to collect the pool of urls this client talks to diff --git a/ddtrace/contrib/internal/pytest/_plugin_v2.py b/ddtrace/contrib/internal/pytest/_plugin_v2.py index 79435f94576..a9736374114 100644 --- a/ddtrace/contrib/internal/pytest/_plugin_v2.py +++ b/ddtrace/contrib/internal/pytest/_plugin_v2.py @@ -60,6 +60,7 @@ from ddtrace.internal.test_visibility.api import InternalTestSession from ddtrace.internal.test_visibility.api import InternalTestSuite from ddtrace.internal.test_visibility.coverage_lines import CoverageLines +from ddtrace.settings.asm import config as asm_config from ddtrace.vendor.debtcollector import deprecate @@ -574,9 +575,10 @@ def _pytest_terminal_summary_post_yield(terminalreporter, failed_reports_initial def pytest_terminal_summary(terminalreporter, exitstatus, config): """Report flaky or failed tests""" try: - from ddtrace.appsec._iast._pytest_plugin import print_iast_report + if asm_config._iast_enabled: + from ddtrace.appsec._iast._pytest_plugin import print_iast_report - print_iast_report(terminalreporter) + print_iast_report(terminalreporter) except Exception: # noqa: E722 log.debug("Encountered error during code security summary", exc_info=True) diff --git a/ddtrace/contrib/internal/pytest/plugin.py b/ddtrace/contrib/internal/pytest/plugin.py index 23bf58d7dcb..cee6d13ce6b 100644 --- a/ddtrace/contrib/internal/pytest/plugin.py +++ b/ddtrace/contrib/internal/pytest/plugin.py @@ -17,13 +17,16 @@ import pytest from ddtrace import config -from ddtrace.appsec._iast._pytest_plugin import ddtrace_iast # noqa:F401 from ddtrace.contrib.internal.pytest._utils import _USE_PLUGIN_V2 from ddtrace.contrib.internal.pytest._utils import _extract_span from ddtrace.contrib.internal.pytest._utils import _pytest_version_supports_itr from ddtrace.settings.asm import config as asm_config +if asm_config._iast_enabled: + from ddtrace.appsec._iast._pytest_plugin import ddtrace_iast # noqa:F401 + + # pytest default settings config._add( "pytest", diff --git a/ddtrace/contrib/internal/requests/patch.py b/ddtrace/contrib/internal/requests/patch.py index eab51c2c0a4..a885e5575de 100644 --- a/ddtrace/contrib/internal/requests/patch.py +++ b/ddtrace/contrib/internal/requests/patch.py @@ -4,9 +4,6 @@ from wrapt import wrap_function_wrapper as _w from ddtrace import config -from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_request -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SSRF from ddtrace.contrib.internal.trace_utils import unwrap as _u from ddtrace.internal.schema import schematize_service_name from ddtrace.internal.utils.formats import asbool @@ -46,10 +43,16 @@ def patch(): _w("requests", "Session.send", _wrap_send) # IAST needs to wrap this function because `Session.send` is too late - _w("requests", "Session.request", _wrap_request) + if asm_config._load_modules: + from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_request + + _w("requests", "Session.request", _wrap_request) Pin(_config=config.requests).onto(requests.Session) if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SSRF + _set_metric_iast_instrumented_sink(VULN_SSRF) @@ -59,5 +62,13 @@ def unpatch(): return requests.__datadog_patch = False - _u(requests.Session, "request") - _u(requests.Session, "send") + try: + _u(requests.Session, "request") + except AttributeError: + # It was not patched + pass + try: + _u(requests.Session, "send") + except AttributeError: + # It was not patched + pass diff --git a/ddtrace/contrib/internal/sqlalchemy/engine.py b/ddtrace/contrib/internal/sqlalchemy/engine.py index a3dcb324700..a20199dbcc2 100644 --- a/ddtrace/contrib/internal/sqlalchemy/engine.py +++ b/ddtrace/contrib/internal/sqlalchemy/engine.py @@ -67,12 +67,9 @@ def __init__(self, tracer, service, engine): self.name = schematize_database_operation("%s.query" % self.vendor, database_provider=self.vendor) # attach the PIN - # Calling ddtrace.trace.Pin(...) with the `tracer` argument generates a deprecation warning. - # Remove this if statement when the `tracer` argument is removed - if self.tracer is ddtrace.tracer: - Pin(service=self.service).onto(engine) - else: - Pin(tracer=tracer, service=self.service).onto(engine) + pin = Pin(service=self.service) + pin._tracer = self.tracer + pin.onto(engine) listen(engine, "before_cursor_execute", self._before_cur_exec) listen(engine, "after_cursor_execute", self._after_cur_exec) diff --git a/ddtrace/contrib/internal/sqlalchemy/patch.py b/ddtrace/contrib/internal/sqlalchemy/patch.py index 916cc53daa4..c6d6df476c1 100644 --- a/ddtrace/contrib/internal/sqlalchemy/patch.py +++ b/ddtrace/contrib/internal/sqlalchemy/patch.py @@ -1,8 +1,6 @@ import sqlalchemy from wrapt import wrap_function_wrapper as _w -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.contrib.internal.trace_utils import unwrap from ddtrace.settings.asm import config as asm_config @@ -24,6 +22,9 @@ def patch(): _w("sqlalchemy.engine", "create_engine", _wrap_create_engine) if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION + _set_metric_iast_instrumented_sink(VULN_SQL_INJECTION) diff --git a/ddtrace/contrib/internal/sqlite3/patch.py b/ddtrace/contrib/internal/sqlite3/patch.py index 03c79789661..68ee5779983 100644 --- a/ddtrace/contrib/internal/sqlite3/patch.py +++ b/ddtrace/contrib/internal/sqlite3/patch.py @@ -5,8 +5,6 @@ import wrapt from ddtrace import config -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.contrib.dbapi import FetchTracedCursor from ddtrace.contrib.dbapi import TracedConnection from ddtrace.contrib.dbapi import TracedCursor @@ -47,6 +45,9 @@ def patch(): sqlite3.dbapi2.connect = wrapped if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION + _set_metric_iast_instrumented_sink(VULN_SQL_INJECTION) diff --git a/ddtrace/contrib/internal/subprocess/patch.py b/ddtrace/contrib/internal/subprocess/patch.py index 2d66edd4737..1ffc1f2d733 100644 --- a/ddtrace/contrib/internal/subprocess/patch.py +++ b/ddtrace/contrib/internal/subprocess/patch.py @@ -58,7 +58,7 @@ def del_lst_callback(name: str): def patch() -> List[str]: - if not (asm_config._asm_enabled or asm_config._iast_enabled): + if not asm_config._load_modules: return [] patched: List[str] = [] @@ -66,7 +66,7 @@ def patch() -> List[str]: import subprocess # nosec should_patch_system = not trace_utils.iswrapped(os.system) - should_patch_fork = not trace_utils.iswrapped(os.fork) + should_patch_fork = (not trace_utils.iswrapped(os.fork)) if hasattr(os, "fork") else False spawnvef = getattr(os, "_spawnvef", None) should_patch_spawnvef = spawnvef is not None and not trace_utils.iswrapped(spawnvef) @@ -316,10 +316,11 @@ def unpatch() -> None: import os # nosec import subprocess # nosec - trace_utils.unwrap(os, "system") - trace_utils.unwrap(os, "_spawnvef") - trace_utils.unwrap(subprocess.Popen, "__init__") - trace_utils.unwrap(subprocess.Popen, "wait") + for obj, attr in [(os, "system"), (os, "_spawnvef"), (subprocess.Popen, "__init__"), (subprocess.Popen, "wait")]: + try: + trace_utils.unwrap(obj, attr) + except AttributeError: + pass SubprocessCmdLine._clear_cache() @@ -327,7 +328,7 @@ def unpatch() -> None: @trace_utils.with_traced_module def _traced_ossystem(module, pin, wrapped, instance, args, kwargs): try: - if asm_config._bypass_instrumentation_for_waf: + if asm_config._bypass_instrumentation_for_waf or not (asm_config._asm_enabled or asm_config._iast_enabled): return wrapped(*args, **kwargs) if isinstance(args[0], str): for callback in _STR_CALLBACKS.values(): @@ -351,6 +352,8 @@ def _traced_ossystem(module, pin, wrapped, instance, args, kwargs): @trace_utils.with_traced_module def _traced_fork(module, pin, wrapped, instance, args, kwargs): + if not (asm_config._asm_enabled or asm_config._iast_enabled): + return wrapped(*args, **kwargs) try: with pin.tracer.trace(COMMANDS.SPAN_NAME, resource="fork", span_type=SpanTypes.SYSTEM) as span: span.set_tag(COMMANDS.EXEC, ["os.fork"]) @@ -366,6 +369,8 @@ def _traced_fork(module, pin, wrapped, instance, args, kwargs): @trace_utils.with_traced_module def _traced_osspawn(module, pin, wrapped, instance, args, kwargs): + if not (asm_config._asm_enabled or asm_config._iast_enabled): + return wrapped(*args, **kwargs) try: mode, file, func_args, _, _ = args if isinstance(func_args, (list, tuple, str)): @@ -395,7 +400,7 @@ def _traced_osspawn(module, pin, wrapped, instance, args, kwargs): @trace_utils.with_traced_module def _traced_subprocess_init(module, pin, wrapped, instance, args, kwargs): try: - if asm_config._bypass_instrumentation_for_waf: + if asm_config._bypass_instrumentation_for_waf or not (asm_config._asm_enabled or asm_config._iast_enabled): return wrapped(*args, **kwargs) cmd_args = args[0] if len(args) else kwargs["args"] if isinstance(cmd_args, (list, tuple, str)): @@ -429,7 +434,7 @@ def _traced_subprocess_init(module, pin, wrapped, instance, args, kwargs): @trace_utils.with_traced_module def _traced_subprocess_wait(module, pin, wrapped, instance, args, kwargs): try: - if asm_config._bypass_instrumentation_for_waf: + if asm_config._bypass_instrumentation_for_waf or not (asm_config._asm_enabled or asm_config._iast_enabled): return wrapped(*args, **kwargs) binary = core.get_item("subprocess_popen_binary") diff --git a/ddtrace/contrib/internal/tornado/application.py b/ddtrace/contrib/internal/tornado/application.py index f36857b81b1..587912f603b 100644 --- a/ddtrace/contrib/internal/tornado/application.py +++ b/ddtrace/contrib/internal/tornado/application.py @@ -54,10 +54,6 @@ def tracer_config(__init__, app, args, kwargs): if tags: tracer.set_tags(tags) - # configure the PIN object for template rendering - # Required for backwards compatibility. Remove the else clause when - # the `ddtrace.trace.Pin` object no longer accepts the Pin argument. - if tracer is ddtrace.tracer: - ddtrace.trace.Pin(service=service).onto(template) - else: - ddtrace.trace.Pin(service=service, tracer=tracer).onto(template) + pin = ddtrace.trace.Pin(service=service) + pin._tracer = tracer + pin.onto(template) diff --git a/ddtrace/contrib/internal/urllib/patch.py b/ddtrace/contrib/internal/urllib/patch.py index ed5e2891f06..1ba279fb20a 100644 --- a/ddtrace/contrib/internal/urllib/patch.py +++ b/ddtrace/contrib/internal/urllib/patch.py @@ -2,9 +2,6 @@ from wrapt import wrap_function_wrapper as _w -from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_open -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SSRF from ddtrace.contrib.internal.trace_utils import unwrap as _u from ddtrace.settings.asm import config as asm_config @@ -20,8 +17,15 @@ def patch(): return urllib.request.__datadog_patch = True - _w("urllib.request", "urlopen", _wrap_open) + if asm_config._load_modules: + from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_open + + _w("urllib.request", "urlopen", _wrap_open) + if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SSRF + _set_metric_iast_instrumented_sink(VULN_SSRF) diff --git a/ddtrace/contrib/internal/urllib3/patch.py b/ddtrace/contrib/internal/urllib3/patch.py index 6c10526c125..7c5d6adc28d 100644 --- a/ddtrace/contrib/internal/urllib3/patch.py +++ b/ddtrace/contrib/internal/urllib3/patch.py @@ -4,9 +4,6 @@ from wrapt import wrap_function_wrapper as _w from ddtrace import config -from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_request -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SSRF from ddtrace.constants import _ANALYTICS_SAMPLE_RATE_KEY from ddtrace.constants import SPAN_KIND from ddtrace.contrib import trace_utils @@ -54,14 +51,20 @@ def patch(): urllib3.__datadog_patch = True _w("urllib3", "connectionpool.HTTPConnectionPool.urlopen", _wrap_urlopen) - if hasattr(urllib3, "_request_methods"): - _w("urllib3._request_methods", "RequestMethods.request", _wrap_request) - else: - # Old version before https://github.com/urllib3/urllib3/pull/2398 - _w("urllib3.request", "RequestMethods.request", _wrap_request) + if asm_config._load_modules: + from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_request + + if hasattr(urllib3, "_request_methods"): + _w("urllib3._request_methods", "RequestMethods.request", _wrap_request) + else: + # Old version before https://github.com/urllib3/urllib3/pull/2398 + _w("urllib3.request", "RequestMethods.request", _wrap_request) Pin().onto(urllib3.connectionpool.HTTPConnectionPool) if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SSRF + _set_metric_iast_instrumented_sink(VULN_SSRF) diff --git a/ddtrace/contrib/internal/valkey/asyncio_patch.py b/ddtrace/contrib/internal/valkey/asyncio_patch.py new file mode 100644 index 00000000000..b8d15a7c603 --- /dev/null +++ b/ddtrace/contrib/internal/valkey/asyncio_patch.py @@ -0,0 +1,36 @@ +from ddtrace import config +from ddtrace._trace.utils_valkey import _instrument_valkey_cmd +from ddtrace._trace.utils_valkey import _instrument_valkey_execute_async_cluster_pipeline +from ddtrace._trace.utils_valkey import _instrument_valkey_execute_pipeline +from ddtrace.contrib.internal.valkey_utils import _run_valkey_command_async +from ddtrace.internal.utils.formats import stringify_cache_args +from ddtrace.trace import Pin + + +async def instrumented_async_execute_command(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return await func(*args, **kwargs) + + with _instrument_valkey_cmd(pin, config.valkey, instance, args) as ctx: + return await _run_valkey_command_async(ctx=ctx, func=func, args=args, kwargs=kwargs) + + +async def instrumented_async_execute_pipeline(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return await func(*args, **kwargs) + + cmds = [stringify_cache_args(c, cmd_max_len=config.valkey.cmd_max_length) for c, _ in instance.command_stack] + with _instrument_valkey_execute_pipeline(pin, config.valkey, cmds, instance): + return await func(*args, **kwargs) + + +async def instrumented_async_execute_cluster_pipeline(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return await func(*args, **kwargs) + + cmds = [stringify_cache_args(c.args, cmd_max_len=config.valkey.cmd_max_length) for c in instance._command_stack] + with _instrument_valkey_execute_async_cluster_pipeline(pin, config.valkey, cmds, instance): + return await func(*args, **kwargs) diff --git a/ddtrace/contrib/internal/valkey/patch.py b/ddtrace/contrib/internal/valkey/patch.py new file mode 100644 index 00000000000..7de63f947c1 --- /dev/null +++ b/ddtrace/contrib/internal/valkey/patch.py @@ -0,0 +1,223 @@ +""" +The valkey integration traces valkey requests. + + +Enabling +~~~~~~~~ + +The valkey integration is enabled automatically when using +:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. + +Or use :func:`patch()` to manually enable the integration:: + + from ddtrace import patch + patch(valkey=True) + + +Global Configuration +~~~~~~~~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.valkey["service"] + + The service name reported by default for valkey traces. + + This option can also be set with the ``DD_VALKEY_SERVICE`` environment + variable. + + Default: ``"valkey"`` + + +.. py:data:: ddtrace.config.valkey["cmd_max_length"] + + Max allowable size for the valkey command span tag. + Anything beyond the max length will be replaced with ``"..."``. + + This option can also be set with the ``DD_VALKEY_CMD_MAX_LENGTH`` environment + variable. + + Default: ``1000`` + + +.. py:data:: ddtrace.config.valkey["resource_only_command"] + + The span resource will only include the command executed. To include all + arguments in the span resource, set this value to ``False``. + + This option can also be set with the ``DD_VALKEY_RESOURCE_ONLY_COMMAND`` environment + variable. + + Default: ``True`` + + +Instance Configuration +~~~~~~~~~~~~~~~~~~~~~~ + +To configure particular valkey instances use the :class:`Pin ` API:: + + import valkey + from ddtrace.trace import Pin + + client = valkey.StrictValkey(host="localhost", port=6379) + + # Override service name for this instance + Pin.override(client, service="my-custom-queue") + + # Traces reported for this client will now have "my-custom-queue" + # as the service name. + client.get("my-key") +""" +import os + +import valkey +import wrapt + +from ddtrace import config +from ddtrace._trace.utils_valkey import _instrument_valkey_cmd +from ddtrace._trace.utils_valkey import _instrument_valkey_execute_pipeline +from ddtrace.contrib.internal.valkey_utils import ROW_RETURNING_COMMANDS +from ddtrace.contrib.internal.valkey_utils import determine_row_count +from ddtrace.contrib.trace_utils import unwrap +from ddtrace.internal import core +from ddtrace.internal.schema import schematize_service_name +from ddtrace.internal.utils.formats import CMD_MAX_LEN +from ddtrace.internal.utils.formats import asbool +from ddtrace.internal.utils.formats import stringify_cache_args +from ddtrace.trace import Pin + + +config._add( + "valkey", + { + "_default_service": schematize_service_name("valkey"), + "cmd_max_length": int(os.getenv("DD_VALKEY_CMD_MAX_LENGTH", CMD_MAX_LEN)), + "resource_only_command": asbool(os.getenv("DD_VALKEY_RESOURCE_ONLY_COMMAND", True)), + }, +) + + +def get_version(): + # type: () -> str + return getattr(valkey, "__version__", "") + + +def patch(): + """Patch the instrumented methods + + This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top + of Valkey and StrictValkey. However, it means that any "import valkey.Valkey" won't be instrumented. + """ + if getattr(valkey, "_datadog_patch", False): + return + valkey._datadog_patch = True + + _w = wrapt.wrap_function_wrapper + + from .asyncio_patch import instrumented_async_execute_cluster_pipeline + from .asyncio_patch import instrumented_async_execute_command + from .asyncio_patch import instrumented_async_execute_pipeline + + _w("valkey", "Valkey.execute_command", instrumented_execute_command(config.valkey)) + _w("valkey", "Valkey.pipeline", instrumented_pipeline) + _w("valkey.client", "Pipeline.execute", instrumented_execute_pipeline(config.valkey, False)) + _w("valkey.client", "Pipeline.immediate_execute_command", instrumented_execute_command(config.valkey)) + _w("valkey.cluster", "ValkeyCluster.execute_command", instrumented_execute_command(config.valkey)) + _w("valkey.cluster", "ValkeyCluster.pipeline", instrumented_pipeline) + _w("valkey.cluster", "ClusterPipeline.execute", instrumented_execute_pipeline(config.valkey, True)) + Pin(service=None).onto(valkey.cluster.ValkeyCluster) + + _w("valkey.asyncio.client", "Valkey.execute_command", instrumented_async_execute_command) + _w("valkey.asyncio.client", "Valkey.pipeline", instrumented_pipeline) + _w("valkey.asyncio.client", "Pipeline.execute", instrumented_async_execute_pipeline) + _w("valkey.asyncio.client", "Pipeline.immediate_execute_command", instrumented_async_execute_command) + Pin(service=None).onto(valkey.asyncio.Valkey) + + _w("valkey.asyncio.cluster", "ValkeyCluster.execute_command", instrumented_async_execute_command) + _w("valkey.asyncio.cluster", "ValkeyCluster.pipeline", instrumented_pipeline) + _w("valkey.asyncio.cluster", "ClusterPipeline.execute", instrumented_async_execute_cluster_pipeline) + Pin(service=None).onto(valkey.asyncio.ValkeyCluster) + + Pin(service=None).onto(valkey.StrictValkey) + + +def unpatch(): + if getattr(valkey, "_datadog_patch", False): + valkey._datadog_patch = False + + unwrap(valkey.Valkey, "execute_command") + unwrap(valkey.Valkey, "pipeline") + unwrap(valkey.client.Pipeline, "execute") + unwrap(valkey.client.Pipeline, "immediate_execute_command") + unwrap(valkey.cluster.ValkeyCluster, "execute_command") + unwrap(valkey.cluster.ValkeyCluster, "pipeline") + unwrap(valkey.cluster.ClusterPipeline, "execute") + unwrap(valkey.asyncio.client.Valkey, "execute_command") + unwrap(valkey.asyncio.client.Valkey, "pipeline") + unwrap(valkey.asyncio.client.Pipeline, "execute") + unwrap(valkey.asyncio.client.Pipeline, "immediate_execute_command") + unwrap(valkey.asyncio.cluster.ValkeyCluster, "execute_command") + unwrap(valkey.asyncio.cluster.ValkeyCluster, "pipeline") + unwrap(valkey.asyncio.cluster.ClusterPipeline, "execute") + + +def _run_valkey_command(ctx: core.ExecutionContext, func, args, kwargs): + parsed_command = stringify_cache_args(args) + valkey_command = parsed_command.split(" ")[0] + rowcount = None + result = None + try: + result = func(*args, **kwargs) + return result + except Exception: + rowcount = 0 + raise + finally: + if rowcount is None: + rowcount = determine_row_count(valkey_command=valkey_command, result=result) + if valkey_command not in ROW_RETURNING_COMMANDS: + rowcount = None + core.dispatch("valkey.command.post", [ctx, rowcount]) + + +# +# tracing functions +# +def instrumented_execute_command(integration_config): + def _instrumented_execute_command(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + with _instrument_valkey_cmd(pin, integration_config, instance, args) as ctx: + return _run_valkey_command(ctx=ctx, func=func, args=args, kwargs=kwargs) + + return _instrumented_execute_command + + +def instrumented_pipeline(func, instance, args, kwargs): + pipeline = func(*args, **kwargs) + pin = Pin.get_from(instance) + if pin: + pin.onto(pipeline) + return pipeline + + +def instrumented_execute_pipeline(integration_config, is_cluster=False): + def _instrumented_execute_pipeline(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + if is_cluster: + cmds = [ + stringify_cache_args(c.args, cmd_max_len=integration_config.cmd_max_length) + for c in instance.command_stack + ] + else: + cmds = [ + stringify_cache_args(c, cmd_max_len=integration_config.cmd_max_length) + for c, _ in instance.command_stack + ] + with _instrument_valkey_execute_pipeline(pin, integration_config, cmds, instance, is_cluster): + return func(*args, **kwargs) + + return _instrumented_execute_pipeline diff --git a/ddtrace/contrib/internal/valkey_utils.py b/ddtrace/contrib/internal/valkey_utils.py new file mode 100644 index 00000000000..8518dbe648a --- /dev/null +++ b/ddtrace/contrib/internal/valkey_utils.py @@ -0,0 +1,84 @@ +from typing import Dict +from typing import List +from typing import Optional +from typing import Union + +from ddtrace.ext import net +from ddtrace.ext import valkey as valkeyx +from ddtrace.internal import core +from ddtrace.internal.utils.formats import stringify_cache_args + + +SINGLE_KEY_COMMANDS = [ + "GET", + "GETDEL", + "GETEX", + "GETRANGE", + "GETSET", + "LINDEX", + "LRANGE", + "RPOP", + "LPOP", + "HGET", + "HGETALL", + "HKEYS", + "HMGET", + "HRANDFIELD", + "HVALS", +] +MULTI_KEY_COMMANDS = ["MGET"] +ROW_RETURNING_COMMANDS = SINGLE_KEY_COMMANDS + MULTI_KEY_COMMANDS + + +def _extract_conn_tags(conn_kwargs): + """Transform valkey conn info into dogtrace metas""" + try: + conn_tags = { + net.TARGET_HOST: conn_kwargs["host"], + net.TARGET_PORT: conn_kwargs["port"], + net.SERVER_ADDRESS: conn_kwargs["host"], + valkeyx.DB: conn_kwargs.get("db") or 0, + } + client_name = conn_kwargs.get("client_name") + if client_name: + conn_tags[valkeyx.CLIENT_NAME] = client_name + return conn_tags + except Exception: + return {} + + +def determine_row_count(valkey_command: str, result: Optional[Union[List, Dict, str]]) -> int: + empty_results = [b"", [], {}, None] + # result can be an empty list / dict / string + if result not in empty_results: + if valkey_command == "MGET": + # only include valid key results within count + result = [x for x in result if x not in empty_results] + return len(result) + elif valkey_command == "HMGET": + # only include valid key results within count + result = [x for x in result if x not in empty_results] + return 1 if len(result) > 0 else 0 + else: + return 1 + else: + return 0 + + +async def _run_valkey_command_async(ctx: core.ExecutionContext, func, args, kwargs): + parsed_command = stringify_cache_args(args) + valkey_command = parsed_command.split(" ")[0] + rowcount = None + result = None + try: + result = await func(*args, **kwargs) + return result + except BaseException: + rowcount = 0 + raise + finally: + if rowcount is None: + rowcount = determine_row_count(valkey_command=valkey_command, result=result) + if valkey_command not in ROW_RETURNING_COMMANDS: + rowcount = None + core.dispatch("valkey.async_command.post", [ctx, rowcount]) diff --git a/ddtrace/contrib/internal/webbrowser/patch.py b/ddtrace/contrib/internal/webbrowser/patch.py index 1387df37ac9..1f90e9cf9aa 100644 --- a/ddtrace/contrib/internal/webbrowser/patch.py +++ b/ddtrace/contrib/internal/webbrowser/patch.py @@ -2,9 +2,6 @@ from wrapt import wrap_function_wrapper as _w -from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_open -from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink -from ddtrace.appsec._iast.constants import VULN_SSRF from ddtrace.contrib.internal.trace_utils import unwrap as _u from ddtrace.settings.asm import config as asm_config @@ -20,9 +17,15 @@ def patch(): return webbrowser.__datadog_patch = True - _w("webbrowser", "open", _wrap_open) + if asm_config._load_modules: + from ddtrace.appsec._common_module_patches import wrapped_request_D8CB81E472AF98A2 as _wrap_open + + _w("webbrowser", "open", _wrap_open) if asm_config._iast_enabled: + from ddtrace.appsec._iast._metrics import _set_metric_iast_instrumented_sink + from ddtrace.appsec._iast.constants import VULN_SSRF + _set_metric_iast_instrumented_sink(VULN_SSRF) diff --git a/ddtrace/contrib/internal/yaaredis/patch.py b/ddtrace/contrib/internal/yaaredis/patch.py deleted file mode 100644 index f9cba77b5bb..00000000000 --- a/ddtrace/contrib/internal/yaaredis/patch.py +++ /dev/null @@ -1,91 +0,0 @@ -import os - -import wrapt -import yaaredis - -from ddtrace import config -from ddtrace._trace.utils_redis import _instrument_redis_cmd -from ddtrace._trace.utils_redis import _instrument_redis_execute_pipeline -from ddtrace.contrib.internal.redis_utils import _run_redis_command_async -from ddtrace.internal.schema import schematize_service_name -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.internal.utils.formats import CMD_MAX_LEN -from ddtrace.internal.utils.formats import asbool -from ddtrace.internal.utils.formats import stringify_cache_args -from ddtrace.internal.utils.wrappers import unwrap -from ddtrace.trace import Pin -from ddtrace.vendor.debtcollector import deprecate - - -config._add( - "yaaredis", - dict( - _default_service=schematize_service_name("redis"), - cmd_max_length=int(os.getenv("DD_YAAREDIS_CMD_MAX_LENGTH", CMD_MAX_LEN)), - resource_only_command=asbool(os.getenv("DD_REDIS_RESOURCE_ONLY_COMMAND", True)), - ), -) - - -def get_version(): - # type: () -> str - return getattr(yaaredis, "__version__", "") - - -def patch(): - """Patch the instrumented methods""" - deprecate( - prefix="The yaaredis module is deprecated.", - message="The yaaredis module is deprecated and will be deleted.", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", - ) - - if getattr(yaaredis, "_datadog_patch", False): - return - yaaredis._datadog_patch = True - - _w = wrapt.wrap_function_wrapper - - _w("yaaredis.client", "StrictRedis.execute_command", traced_execute_command) - _w("yaaredis.client", "StrictRedis.pipeline", traced_pipeline) - _w("yaaredis.pipeline", "StrictPipeline.execute", traced_execute_pipeline) - _w("yaaredis.pipeline", "StrictPipeline.immediate_execute_command", traced_execute_command) - Pin().onto(yaaredis.StrictRedis) - - -def unpatch(): - if getattr(yaaredis, "_datadog_patch", False): - yaaredis._datadog_patch = False - - unwrap(yaaredis.client.StrictRedis, "execute_command") - unwrap(yaaredis.client.StrictRedis, "pipeline") - unwrap(yaaredis.pipeline.StrictPipeline, "execute") - unwrap(yaaredis.pipeline.StrictPipeline, "immediate_execute_command") - - -async def traced_execute_command(func, instance, args, kwargs): - pin = Pin.get_from(instance) - if not pin or not pin.enabled(): - return await func(*args, **kwargs) - - with _instrument_redis_cmd(pin, config.yaaredis, instance, args) as ctx: - return await _run_redis_command_async(ctx=ctx, func=func, args=args, kwargs=kwargs) - - -async def traced_pipeline(func, instance, args, kwargs): - pipeline = await func(*args, **kwargs) - pin = Pin.get_from(instance) - if pin: - pin.onto(pipeline) - return pipeline - - -async def traced_execute_pipeline(func, instance, args, kwargs): - pin = Pin.get_from(instance) - if not pin or not pin.enabled(): - return await func(*args, **kwargs) - - cmds = [stringify_cache_args(c, cmd_max_len=config.yaaredis.cmd_max_length) for c, _ in instance.command_stack] - with _instrument_redis_execute_pipeline(pin, config.yaaredis, cmds, instance): - return await func(*args, **kwargs) diff --git a/ddtrace/contrib/valkey/__init__.py b/ddtrace/contrib/valkey/__init__.py new file mode 100644 index 00000000000..c898aff012d --- /dev/null +++ b/ddtrace/contrib/valkey/__init__.py @@ -0,0 +1,68 @@ +""" +The valkey integration traces valkey requests. + + +Enabling +~~~~~~~~ + +The valkey integration is enabled automatically when using +:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. + +Or use :func:`patch()` to manually enable the integration:: + + from ddtrace import patch + patch(valkey=True) + + +Global Configuration +~~~~~~~~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.valkey["service"] + + The service name reported by default for valkey traces. + + This option can also be set with the ``DD_VALKEY_SERVICE`` environment + variable. + + Default: ``"valkey"`` + + +.. py:data:: ddtrace.config.valkey["cmd_max_length"] + + Max allowable size for the valkey command span tag. + Anything beyond the max length will be replaced with ``"..."``. + + This option can also be set with the ``DD_VALKEY_CMD_MAX_LENGTH`` environment + variable. + + Default: ``1000`` + + +.. py:data:: ddtrace.config.valkey["resource_only_command"] + + The span resource will only include the command executed. To include all + arguments in the span resource, set this value to ``False``. + + This option can also be set with the ``DD_VALKEY_RESOURCE_ONLY_COMMAND`` environment + variable. + + Default: ``True`` + + +Instance Configuration +~~~~~~~~~~~~~~~~~~~~~~ + +To configure particular valkey instances use the :class:`Pin ` API:: + + import valkey + from ddtrace.trace import Pin + + client = valkey.StrictValkey(host="localhost", port=6379) + + # Override service name for this instance + Pin.override(client, service="my-custom-queue") + + # Traces reported for this client will now have "my-custom-queue" + # as the service name. + client.get("my-key") +""" diff --git a/ddtrace/debugging/_expressions.py b/ddtrace/debugging/_expressions.py index 32b87017cdf..8de011e5f5e 100644 --- a/ddtrace/debugging/_expressions.py +++ b/ddtrace/debugging/_expressions.py @@ -46,6 +46,7 @@ from ddtrace.debugging._safety import safe_getitem from ddtrace.internal.compat import PYTHON_VERSION_INFO as PY from ddtrace.internal.logger import get_logger +from ddtrace.internal.safety import _isinstance DDASTType = Union[Dict[str, Any], Dict[str, List[Any]], Any] @@ -126,7 +127,7 @@ def _make_function(self, ast: DDASTType, args: Tuple[str, ...], name: str) -> Fu return FunctionType(abstract_code.to_code(), {}, name, (), None) def _make_lambda(self, ast: DDASTType) -> Callable[[Any, Any], Any]: - return self._make_function(ast, ("_dd_it", "_locals"), "") + return self._make_function(ast, ("_dd_it", "_dd_key", "_dd_value", "_locals"), "") def _compile_direct_predicate(self, ast: DDASTType) -> Optional[List[Instr]]: # direct_predicate => {"": } @@ -200,12 +201,12 @@ def _compile_arg_predicate(self, ast: DDASTType) -> Optional[List[Instr]]: if ca is None: raise ValueError("Invalid argument: %r" % a) - return self._call_function( - lambda i, c, _locals: f(c(_, _locals) for _ in i), - ca, - [Instr("LOAD_CONST", fb)], - [Instr("LOAD_FAST", "_locals")], - ) + def coll_iter(it, cond, _locals): + if _isinstance(it, dict): + return f(cond(k, k, v, _locals) for k, v in it.items()) + return f(cond(e, None, None, _locals) for e in it) + + return self._call_function(coll_iter, ca, [Instr("LOAD_CONST", fb)], [Instr("LOAD_FAST", "_locals")]) if _type in {"startsWith", "endsWith"}: a, b = args @@ -245,8 +246,8 @@ def _compile_direct_operation(self, ast: DDASTType) -> Optional[List[Instr]]: if not isinstance(arg, str): return None - if arg == "@it": - return [Instr("LOAD_FAST", "_dd_it")] + if arg in {"@it", "@key", "@value"}: + return [Instr("LOAD_FAST", f"_dd_{arg[1:]}")] return self._call_function( get_local, [Instr("LOAD_FAST", "_locals")], [Instr("LOAD_CONST", self.__ref__(arg))] @@ -297,12 +298,12 @@ def _compile_arg_operation(self, ast: DDASTType) -> Optional[List[Instr]]: if ca is None: raise ValueError("Invalid argument: %r" % a) - return self._call_function( - lambda i, c, _locals: type(i)(_ for _ in i if c(_, _locals)), - ca, - [Instr("LOAD_CONST", fb)], - [Instr("LOAD_FAST", "_locals")], - ) + def coll_filter(it, cond, _locals): + if _isinstance(it, dict): + return type(it)({k: v for k, v in it.items() if cond(k, k, v, _locals)}) + return type(it)(e for e in it if cond(e, None, None, _locals)) + + return self._call_function(coll_filter, ca, [Instr("LOAD_CONST", fb)], [Instr("LOAD_FAST", "_locals")]) if _type == "getmember": v, attr = args diff --git a/ddtrace/ext/__init__.py b/ddtrace/ext/__init__.py index 965dd04f43f..98cc5226100 100644 --- a/ddtrace/ext/__init__.py +++ b/ddtrace/ext/__init__.py @@ -16,6 +16,7 @@ class SpanTypes(object): AUTH = "auth" SYSTEM = "system" LLM = "llm" + VALKEY = "valkey" class SpanKind(object): @@ -35,5 +36,6 @@ class SpanKind(object): SpanTypes.REDIS, SpanTypes.SQL, SpanTypes.WORKER, + SpanTypes.VALKEY, } ) diff --git a/ddtrace/ext/valkey.py b/ddtrace/ext/valkey.py new file mode 100644 index 00000000000..3246af841f6 --- /dev/null +++ b/ddtrace/ext/valkey.py @@ -0,0 +1,14 @@ +# defaults +APP = "valkey" +DEFAULT_SERVICE = "valkey" + +# net extension +DB = "out.valkey_db" + +# standard tags +RAWCMD = "valkey.raw_command" +CMD = "valkey.command" +ARGS_LEN = "valkey.args_length" +PIPELINE_LEN = "valkey.pipeline_length" +PIPELINE_AGE = "valkey.pipeline_age" +CLIENT_NAME = "valkey.client_name" diff --git a/ddtrace/filters.py b/ddtrace/filters.py deleted file mode 100644 index bd6367d5635..00000000000 --- a/ddtrace/filters.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace._trace.filters import * # noqa: F403 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.filters module and the ``FilterRequestsOnUrl`` class is deprecated and will be removed.", - message="Import ``TraceFilter`` from the ddtrace.trace package.", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/internal/appsec/product.py b/ddtrace/internal/appsec/product.py index 126d6d2a04f..e0854ff2a2a 100644 --- a/ddtrace/internal/appsec/product.py +++ b/ddtrace/internal/appsec/product.py @@ -1,4 +1,3 @@ -from ddtrace import config from ddtrace.settings.asm import config as asm_config @@ -10,14 +9,14 @@ def post_preload(): def start(): - if asm_config._asm_enabled or config._remote_config_enabled: + if asm_config._asm_rc_enabled: from ddtrace.appsec._remoteconfiguration import enable_appsec_rc enable_appsec_rc() def restart(join=False): - if asm_config._asm_enabled or config._remote_config_enabled: + if asm_config._asm_rc_enabled: from ddtrace.appsec._remoteconfiguration import _forksafe_appsec_rc _forksafe_appsec_rc() diff --git a/ddtrace/internal/logger.py b/ddtrace/internal/logger.py index 0592afa0a74..6f7b853c3dc 100644 --- a/ddtrace/internal/logger.py +++ b/ddtrace/internal/logger.py @@ -1,180 +1,80 @@ import collections import logging import os -import typing -from typing import Optional # noqa:F401 -from typing import cast # noqa:F401 +from typing import DefaultDict +from typing import Tuple -if typing.TYPE_CHECKING: - from typing import Any # noqa:F401 - from typing import DefaultDict # noqa:F401 - from typing import Tuple # noqa:F401 - - -def get_logger(name): - # type: (str) -> DDLogger +def get_logger(name: str) -> logging.Logger: """ - Retrieve or create a ``DDLogger`` instance. - - This function mirrors the behavior of `logging.getLogger`. + Retrieve or create a ``Logger`` instance with consistent behavior for internal use. - If no logger with the provided name has been fetched before then - a new one is created. + Configure all loggers with a rate limiter filter to prevent excessive logging. - If a previous logger has been created then it is returned. - - DEV: We do not want to mess with `logging.setLoggerClass()` - That will totally mess with the user's loggers, we want - just our own, selective loggers to be DDLoggers - - :param name: The name of the logger to fetch or create - :type name: str - :return: The logger instance - :rtype: ``DDLogger`` """ - # DEV: `logging.Logger.manager` refers to the single root `logging.Manager` instance - # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1824-L1826 # noqa:E501 - manager = logging.Logger.manager + logger = logging.getLogger(name) + # addFilter will only add the filter if it is not already present + logger.addFilter(log_filter) + return logger - # If the logger does not exist yet, create it - # DEV: `Manager.loggerDict` is a dict mapping logger name to logger - # DEV: This is a simplified version of `logging.Manager.getLogger` - # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1221-L1253 # noqa:E501 - # DEV: _fixupParents could be adding a placeholder, we want to replace it if that's the case - if name in manager.loggerDict: - logger = manager.loggerDict[name] - if isinstance(manager.loggerDict[name], logging.PlaceHolder): - placeholder = logger - logger = DDLogger(name=name) - manager.loggerDict[name] = logger - # DEV: `_fixupChildren` and `_fixupParents` have been around for awhile, - # DEV: but add the `hasattr` guard... just in case. - if hasattr(manager, "_fixupChildren"): - manager._fixupChildren(placeholder, logger) - if hasattr(manager, "_fixupParents"): - manager._fixupParents(logger) - else: - logger = DDLogger(name=name) - manager.loggerDict[name] = logger - if hasattr(manager, "_fixupParents"): - manager._fixupParents(logger) - # Return our logger - return cast(DDLogger, logger) +# Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped +LoggingBucket = collections.namedtuple("LoggingBucket", ("bucket", "skipped")) +# Dict to keep track of the current time bucket per name/level/pathname/lineno +_buckets: DefaultDict[Tuple[str, int, str, int], LoggingBucket] = collections.defaultdict(lambda: LoggingBucket(0, 0)) +# Allow 1 log record per name/level/pathname/lineno every 60 seconds by default +# Allow configuring via `DD_TRACE_LOGGING_RATE` +# DEV: `DD_TRACE_LOGGING_RATE=0` means to disable all rate limiting +_rate_limit = int(os.getenv("DD_TRACE_LOGGING_RATE", default=60)) -def hasHandlers(self): - # type: (DDLogger) -> bool - """ - See if this logger has any handlers configured. - Loop through all handlers for this logger and its parents in the - logger hierarchy. Return True if a handler was found, else False. - Stop searching up the hierarchy whenever a logger with the "propagate" - attribute set to zero is found - that will be the last logger which - is checked for the existence of handlers. - https://github.com/python/cpython/blob/8f192d12af82c4dc40730bf59814f6a68f68f950/Lib/logging/__init__.py#L1629 +def log_filter(record: logging.LogRecord) -> bool: """ - c = self - rv = False - while c: - if c.handlers: - rv = True - break - if not c.propagate: - break - else: - c = c.parent # type: ignore - return rv + Function used to determine if a log record should be outputted or not (True = output, False = skip). - -class DDLogger(logging.Logger): - """ - Custom rate limited logger used by ``ddtrace`` - - This logger class is used to rate limit the output of - log messages from within the ``ddtrace`` package. + This function will: + - Log all records with a level of ERROR or higher with telemetry + - Rate limit log records based on the logger name, record level, filename, and line number """ + if record.levelno >= logging.ERROR: + # avoid circular import + from ddtrace.internal import telemetry - # Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped - LoggingBucket = collections.namedtuple("LoggingBucket", ("bucket", "skipped")) - - def __init__(self, *args, **kwargs): - # type: (*Any, **Any) -> None - """Constructor for ``DDLogger``""" - super(DDLogger, self).__init__(*args, **kwargs) - - # Dict to keep track of the current time bucket per name/level/pathname/lineno - self.buckets = collections.defaultdict( - lambda: DDLogger.LoggingBucket(0, 0) - ) # type: DefaultDict[Tuple[str, int, str, int], DDLogger.LoggingBucket] + # currently we only have one error code + full_file_name = os.path.join(record.pathname, record.filename) + telemetry.telemetry_writer.add_error(1, record.msg % record.args, full_file_name, record.lineno) - # Allow 1 log record per name/level/pathname/lineno every 60 seconds by default - # Allow configuring via `DD_TRACE_LOGGING_RATE` - # DEV: `DD_TRACE_LOGGING_RATE=0` means to disable all rate limiting - rate_limit = os.getenv("DD_TRACE_LOGGING_RATE", default=None) - - if rate_limit is not None: - self.rate_limit = int(rate_limit) - else: - self.rate_limit = 60 - - def handle(self, record): - # type: (logging.LogRecord) -> None - """ - Function used to call the handlers for a log line. - - This implementation will first determine if this log line should - be logged or rate limited, and then call the base ``logging.Logger.handle`` - function if it should be logged - - DEV: This method has all of it's code inlined to reduce on functions calls - - :param record: The log record being logged - :type record: ``logging.LogRecord`` - """ - if record.levelno >= logging.ERROR: - # avoid circular import - from ddtrace.internal import telemetry - - # currently we only have one error code - full_file_name = os.path.join(record.pathname, record.filename) - telemetry.telemetry_writer.add_error(1, record.msg % record.args, full_file_name, record.lineno) - - # If rate limiting has been disabled (`DD_TRACE_LOGGING_RATE=0`) then apply no rate limit - # If the logging is in debug, then do not apply any limits to any log - if not self.rate_limit or self.getEffectiveLevel() == logging.DEBUG: - super(DDLogger, self).handle(record) - return + logger = logging.getLogger(record.name) + # If rate limiting has been disabled (`DD_TRACE_LOGGING_RATE=0`) then apply no rate limit + # If the logger is set to debug, then do not apply any limits to any log + if not _rate_limit or logger.getEffectiveLevel() == logging.DEBUG: + return True # Allow 1 log record by name/level/pathname/lineno every X seconds - # DEV: current unix time / rate (e.g. 300 seconds) = time bucket - # int(1546615098.8404942 / 300) = 515538 - # DEV: LogRecord `created` is a unix timestamp/float - # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` - current_bucket = int(record.created / self.rate_limit) - - # Limit based on logger name, record level, filename, and line number - # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137) - # This way each unique log message can get logged at least once per time period - # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` - key = (record.name, record.levelno, record.pathname, record.lineno) - - # Only log this message if the time bucket has changed from the previous time we ran - logging_bucket = self.buckets[key] - if logging_bucket.bucket != current_bucket: - # Append count of skipped messages if we have skipped some since our last logging - if logging_bucket.skipped: - record.msg = "{}, %s additional messages skipped".format(record.msg) - record.args = record.args + (logging_bucket.skipped,) # type: ignore - + # DEV: current unix time / rate (e.g. 300 seconds) = time bucket + # int(1546615098.8404942 / 300) = 515538 + # DEV: LogRecord `created` is a unix timestamp/float + # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` + current_bucket = int(record.created / _rate_limit) + # Limit based on logger name, record level, filename, and line number + # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137) + # This way each unique log message can get logged at least once per time period + # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` + key = (record.name, record.levelno, record.pathname, record.lineno) + # Only log this message if the time bucket has changed from the previous time we ran + logging_bucket = _buckets[key] + if logging_bucket.bucket != current_bucket: + # Append count of skipped messages if we have skipped some since our last logging + if logging_bucket.skipped: + record.msg = "{}, %s additional messages skipped".format(record.msg) + record.args = record.args + (logging_bucket.skipped,) # type: ignore # Reset our bucket - self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0) - - # Call the base handle to actually log this record - super(DDLogger, self).handle(record) - else: - # Increment the count of records we have skipped - # DEV: `self.buckets[key]` is a tuple which is immutable so recreate instead - self.buckets[key] = DDLogger.LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1) + _buckets[key] = LoggingBucket(current_bucket, 0) + # Actually log this record + return True + # Increment the count of records we have skipped + # DEV: `buckets[key]` is a tuple which is immutable so recreate instead + _buckets[key] = LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1) + # Skip this log message + return False diff --git a/ddtrace/internal/rate_limiter.py b/ddtrace/internal/rate_limiter.py index 0a97a6a7abc..9b514e5ff32 100644 --- a/ddtrace/internal/rate_limiter.py +++ b/ddtrace/internal/rate_limiter.py @@ -9,9 +9,6 @@ from typing import Callable # noqa:F401 from typing import Optional # noqa:F401 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - class RateLimiter(object): """ @@ -57,26 +54,18 @@ def __init__(self, rate_limit: int, time_window: float = 1e9): self._lock = threading.Lock() - def is_allowed(self, timestamp_ns: Optional[int] = None) -> bool: + def is_allowed(self) -> bool: """ Check whether the current request is allowed or not This method will also reduce the number of available tokens by 1 - :param int timestamp_ns: timestamp in nanoseconds for the current request. :returns: Whether the current request is allowed or not :rtype: :obj:`bool` """ - if timestamp_ns is not None: - deprecate( - "The `timestamp_ns` parameter is deprecated and will be removed in a future version." - "Ratelimiter will use the current time.", - category=DDTraceDeprecationWarning, - ) - # rate limits are tested and mocked in pytest so we need to compute the timestamp here # (or move the unit tests to rust) - timestamp_ns = timestamp_ns or time.monotonic_ns() + timestamp_ns = time.monotonic_ns() allowed = self._is_allowed(timestamp_ns) # Update counts used to determine effective rate self._update_rate_counts(allowed, timestamp_ns) diff --git a/ddtrace/internal/remoteconfig/worker.py b/ddtrace/internal/remoteconfig/worker.py index 5429e599e74..08650bd8507 100644 --- a/ddtrace/internal/remoteconfig/worker.py +++ b/ddtrace/internal/remoteconfig/worker.py @@ -2,7 +2,6 @@ from typing import List # noqa:F401 from ddtrace.internal import agent -from ddtrace.internal import atexit from ddtrace.internal import forksafe from ddtrace.internal import periodic from ddtrace.internal.logger import get_logger @@ -132,9 +131,6 @@ def disable(self, join=False): if self.status == ServiceStatus.STOPPED: return - forksafe.unregister(self.reset_at_fork) - atexit.unregister(self.disable) - self.stop(join=join) def _stop_service(self, *args, **kwargs): diff --git a/ddtrace/internal/tracemethods.py b/ddtrace/internal/tracemethods.py index 5328797c09f..456cca597e1 100644 --- a/ddtrace/internal/tracemethods.py +++ b/ddtrace/internal/tracemethods.py @@ -4,8 +4,6 @@ import wrapt from ddtrace.internal.logger import get_logger -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate log = get_logger(__name__) @@ -65,102 +63,10 @@ def _parse_trace_methods(raw_dd_trace_methods: str) -> List[Tuple[str, str]]: return dd_trace_methods -def _parse_legacy_trace_methods(raw_dd_trace_methods: str) -> List[str]: - """ - Return a list of method names to trace based on the specification of - DD_TRACE_METHODS. - - Note that support for wildcard methods with [*] is not implemented. - - This square bracket notation will be deprecated in favor of the new ':' notation - TODO: This method can be deleted once the legacy syntax is officially deprecated - """ - if not raw_dd_trace_methods: - return [] - dd_trace_methods = [] - for qualified_methods in raw_dd_trace_methods.split(";"): - # Validate that methods are specified - if "[" not in qualified_methods or "]" not in qualified_methods: - log.warning( - ( - "Invalid DD_TRACE_METHODS: %s. " - "Methods must be specified in square brackets following the fully qualified module or class name." - ), - qualified_methods, - ) - return [] - - # Store the prefix of the qualified method name (eg. for "foo.bar.baz[qux,quux]", this is "foo.bar.baz") - qualified_method_prefix = qualified_methods.split("[")[0] - - if qualified_method_prefix == "__main__": - # __main__ cannot be used since the __main__ that exists now is not the same as the __main__ that the user - # application will have. __main__ when sitecustomize module is run is the builtin __main__. - log.warning( - "Invalid DD_TRACE_METHODS: %s. Methods cannot be traced on the __main__ module.", qualified_methods - ) - return [] - - # Get the class or module name of the method (eg. for "foo.bar.baz[qux,quux]", this is "baz[qux,quux]") - class_or_module_with_methods = qualified_methods.split(".")[-1] - - # Strip off the leading 'moduleOrClass[' and trailing ']' - methods = class_or_module_with_methods.split("[")[1] - methods = methods[:-1] - - # Add the methods to the list of methods to trace - for method in methods.split(","): - if not str.isidentifier(method): - log.warning( - "Invalid method name: %r. %s", - method, - ( - "You might have a trailing comma." - if method == "" - else "Method names must be valid Python identifiers." - ), - ) - return [] - dd_trace_methods.append("%s.%s" % (qualified_method_prefix, method)) - return dd_trace_methods - - def _install_trace_methods(raw_dd_trace_methods: str) -> None: """Install tracing on the given methods.""" - if "[" in raw_dd_trace_methods: - deprecate( - "Using DD_TRACE_METHODS with the '[]' notation is deprecated", - message="Please use DD_TRACE_METHODS with the new ':' notation instead", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, - ) - # Using legacy syntax - for qualified_method in _parse_legacy_trace_methods(raw_dd_trace_methods): - # We don't know if the method is a class method or a module method, so we need to assume it's a module - # and if the import fails then go a level up and try again. - base_module_guess = ".".join(qualified_method.split(".")[:-1]) - method_name = qualified_method.split(".")[-1] - module = None - - while base_module_guess: - try: - module = __import__(base_module_guess) - except ImportError: - # Add the class to the method name - method_name = "%s.%s" % (base_module_guess.split(".")[-1], method_name) - base_module_guess = ".".join(base_module_guess.split(".")[:-1]) - else: - break - - if module is None: - log.warning("Could not import module for %r", qualified_method) - continue - - trace_method(base_module_guess, method_name) - else: - # Using updated syntax, no need to try to import - for module_name, method_name in _parse_trace_methods(raw_dd_trace_methods): - trace_method(module_name, method_name) + for module_name, method_name in _parse_trace_methods(raw_dd_trace_methods): + trace_method(module_name, method_name) def trace_method(module, method_name): diff --git a/ddtrace/internal/wrapping/asyncs.py b/ddtrace/internal/wrapping/asyncs.py index 855578f9db2..d0ed131e962 100644 --- a/ddtrace/internal/wrapping/asyncs.py +++ b/ddtrace/internal/wrapping/asyncs.py @@ -537,96 +537,6 @@ """ ) -elif PY >= (3, 7): - COROUTINE_ASSEMBLY.parse( - r""" - get_awaitable - load_const None - yield_from - """ - ) - - ASYNC_GEN_ASSEMBLY.parse( - r""" - setup_except @stopiter - dup_top - store_fast $__ddgen - load_attr $asend - store_fast $__ddgensend - load_fast $__ddgen - load_attr $__anext__ - call_function 0 - - loop: - get_awaitable - load_const None - yield_from - - yield: - setup_except @genexit - yield_value - pop_block - load_fast $__ddgensend - rot_two - call_function 1 - jump_absolute @loop - - genexit: - dup_top - load_const GeneratorExit - compare_op asm.Compare.EXC_MATCH - pop_jump_if_false @exc - pop_top - pop_top - pop_top - pop_top - load_fast $__ddgen - load_attr $aclose - call_function 0 - get_awaitable - load_const None - yield_from - pop_except - return_value - - exc: - pop_top - pop_top - pop_top - pop_top - load_fast $__ddgen - load_attr $athrow - load_const sys.exc_info - call_function 0 - call_function_ex 0 - get_awaitable - load_const None - yield_from - store_fast $__value - pop_except - load_fast $__value - jump_absolute @yield - - stopiter: - dup_top - load_const StopAsyncIteration - compare_op asm.Compare.EXC_MATCH - pop_jump_if_false @propagate - pop_top - pop_top - pop_top - pop_except - load_const None - return_value - - propagate: - end_finally - load_const None - return_value - """ - ) - - else: msg = "No async wrapping support for Python %d.%d" % PY[:2] raise RuntimeError(msg) diff --git a/ddtrace/internal/wrapping/context.py b/ddtrace/internal/wrapping/context.py index cf36a93011b..393bd097da5 100644 --- a/ddtrace/internal/wrapping/context.py +++ b/ddtrace/internal/wrapping/context.py @@ -274,7 +274,7 @@ ) -elif sys.version_info >= (3, 7): +elif sys.version_info >= (3, 8): CONTEXT_HEAD.parse( r""" load_const {context} diff --git a/ddtrace/internal/wrapping/generators.py b/ddtrace/internal/wrapping/generators.py index f2a98b42a18..9ec5a654556 100644 --- a/ddtrace/internal/wrapping/generators.py +++ b/ddtrace/internal/wrapping/generators.py @@ -383,77 +383,6 @@ """ ) - -elif PY >= (3, 7): - GENERATOR_ASSEMBLY.parse( - r""" - setup_except @stopiter - dup_top - store_fast $__ddgen - load_attr $send - store_fast $__ddgensend - load_const next - load_fast $__ddgen - - loop: - call_function 1 - - yield: - setup_except @genexit - yield_value - pop_block - load_fast $__ddgensend - rot_two - jump_absolute @loop - - genexit: - dup_top - load_const GeneratorExit - compare_op asm.Compare.EXC_MATCH - pop_jump_if_false @exc - pop_top - pop_top - pop_top - pop_top - load_fast $__ddgen - load_attr $close - call_function 0 - return_value - - exc: - pop_top - pop_top - pop_top - pop_top - load_fast $__ddgen - load_attr $throw - load_const sys.exc_info - call_function 0 - call_function_ex 0 - store_fast $__value - pop_except - load_fast $__value - jump_absolute @yield - - stopiter: - dup_top - load_const StopIteration - compare_op asm.Compare.EXC_MATCH - pop_jump_if_false @propagate - pop_top - pop_top - pop_top - pop_except - load_const None - return_value - - propagate: - end_finally - load_const None - return_value - """ - ) - else: msg = "No generator wrapping support for Python %d.%d" % PY[:2] raise RuntimeError(msg) diff --git a/ddtrace/internal/writer/writer.py b/ddtrace/internal/writer/writer.py index da3f09a99b1..301d0400c7c 100644 --- a/ddtrace/internal/writer/writer.py +++ b/ddtrace/internal/writer/writer.py @@ -578,9 +578,7 @@ def start(self): try: # appsec remote config should be enabled/started after the global tracer and configs # are initialized - if os.getenv("AWS_LAMBDA_FUNCTION_NAME") is None and ( - asm_config._asm_enabled or config._remote_config_enabled - ): + if asm_config._asm_rc_enabled: from ddtrace.appsec._remoteconfiguration import enable_appsec_rc enable_appsec_rc() diff --git a/ddtrace/llmobs/_integrations/anthropic.py b/ddtrace/llmobs/_integrations/anthropic.py index a3224a083cd..bb4f96e7814 100644 --- a/ddtrace/llmobs/_integrations/anthropic.py +++ b/ddtrace/llmobs/_integrations/anthropic.py @@ -7,16 +7,14 @@ from ddtrace.internal.logger import get_logger from ddtrace.llmobs._constants import INPUT_MESSAGES -from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import METADATA from ddtrace.llmobs._constants import METRICS from ddtrace.llmobs._constants import MODEL_NAME from ddtrace.llmobs._constants import MODEL_PROVIDER from ddtrace.llmobs._constants import OUTPUT_MESSAGES -from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import SPAN_KIND -from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags from ddtrace.llmobs._utils import _get_attr from ddtrace.trace import Span @@ -77,7 +75,7 @@ def _llmobs_set_tags( INPUT_MESSAGES: input_messages, METADATA: parameters, OUTPUT_MESSAGES: output_messages, - METRICS: self._get_llmobs_metrics_tags(span), + METRICS: get_llmobs_metrics_tags("anthropic", span), } ) @@ -188,18 +186,3 @@ def record_usage(self, span: Span, usage: Dict[str, Any]) -> None: span.set_metric("anthropic.response.usage.output_tokens", output_tokens) if input_tokens is not None and output_tokens is not None: span.set_metric("anthropic.response.usage.total_tokens", input_tokens + output_tokens) - - @staticmethod - def _get_llmobs_metrics_tags(span): - usage = {} - input_tokens = span.get_metric("anthropic.response.usage.input_tokens") - output_tokens = span.get_metric("anthropic.response.usage.output_tokens") - total_tokens = span.get_metric("anthropic.response.usage.total_tokens") - - if input_tokens is not None: - usage[INPUT_TOKENS_METRIC_KEY] = input_tokens - if output_tokens is not None: - usage[OUTPUT_TOKENS_METRIC_KEY] = output_tokens - if total_tokens is not None: - usage[TOTAL_TOKENS_METRIC_KEY] = total_tokens - return usage diff --git a/ddtrace/llmobs/_integrations/bedrock.py b/ddtrace/llmobs/_integrations/bedrock.py index ac6092cbe1a..cbc1456fc24 100644 --- a/ddtrace/llmobs/_integrations/bedrock.py +++ b/ddtrace/llmobs/_integrations/bedrock.py @@ -5,18 +5,16 @@ from ddtrace.internal.logger import get_logger from ddtrace.llmobs._constants import INPUT_MESSAGES -from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import METADATA from ddtrace.llmobs._constants import METRICS from ddtrace.llmobs._constants import MODEL_NAME from ddtrace.llmobs._constants import MODEL_PROVIDER from ddtrace.llmobs._constants import OUTPUT_MESSAGES -from ddtrace.llmobs._constants import OUTPUT_TOKENS_METRIC_KEY from ddtrace.llmobs._constants import PARENT_ID_KEY from ddtrace.llmobs._constants import PROPAGATED_PARENT_ID_KEY from ddtrace.llmobs._constants import SPAN_KIND -from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations import BaseLLMIntegration +from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags from ddtrace.llmobs._utils import _get_llmobs_parent_id from ddtrace.trace import Span @@ -57,22 +55,11 @@ def _llmobs_set_tags( MODEL_PROVIDER: span.get_tag("bedrock.request.model_provider") or "", INPUT_MESSAGES: input_messages, METADATA: parameters, - METRICS: self._llmobs_metrics(span, response), + METRICS: get_llmobs_metrics_tags("bedrock", span), OUTPUT_MESSAGES: output_messages, } ) - @staticmethod - def _llmobs_metrics(span: Span, response: Optional[Dict[str, Any]]) -> Dict[str, Any]: - metrics = {} - if response and response.get("text"): - prompt_tokens = int(span.get_tag("bedrock.usage.prompt_tokens") or 0) - completion_tokens = int(span.get_tag("bedrock.usage.completion_tokens") or 0) - metrics[INPUT_TOKENS_METRIC_KEY] = prompt_tokens - metrics[OUTPUT_TOKENS_METRIC_KEY] = completion_tokens - metrics[TOTAL_TOKENS_METRIC_KEY] = prompt_tokens + completion_tokens - return metrics - @staticmethod def _extract_input_message(prompt): """Extract input messages from the stored prompt. diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index ecec71e0645..0407ec7188b 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -14,7 +14,7 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._integrations.utils import extract_message_from_part_google -from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags_google +from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags from ddtrace.llmobs._integrations.utils import get_system_instructions_from_google_model from ddtrace.llmobs._integrations.utils import llmobs_get_metadata_google from ddtrace.llmobs._utils import _get_attr @@ -59,7 +59,7 @@ def _llmobs_set_tags( METADATA: metadata, INPUT_MESSAGES: input_messages, OUTPUT_MESSAGES: output_messages, - METRICS: get_llmobs_metrics_tags_google("google_generativeai", span), + METRICS: get_llmobs_metrics_tags("google_generativeai", span), } ) diff --git a/ddtrace/llmobs/_integrations/langchain.py b/ddtrace/llmobs/_integrations/langchain.py index c6a77fad3bc..d380c6ab7a8 100644 --- a/ddtrace/llmobs/_integrations/langchain.py +++ b/ddtrace/llmobs/_integrations/langchain.py @@ -6,8 +6,6 @@ from typing import Optional from typing import Union -from ddtrace import config -from ddtrace.constants import ERROR_TYPE from ddtrace.internal.logger import get_logger from ddtrace.internal.utils import ArgumentError from ddtrace.internal.utils import get_argument_value @@ -454,54 +452,6 @@ def _set_base_span_tags( # type: ignore[override] else: span.set_tag_str(API_KEY, api_key) - @classmethod - def _logs_tags(cls, span: Span) -> str: - api_key = span.get_tag(API_KEY) or "" - tags = "env:%s,version:%s,%s:%s,%s:%s,%s:%s,%s:%s" % ( # noqa: E501 - (config.env or ""), - (config.version or ""), - PROVIDER, - (span.get_tag(PROVIDER) or ""), - MODEL, - (span.get_tag(MODEL) or ""), - TYPE, - (span.get_tag(TYPE) or ""), - API_KEY, - api_key, - ) - return tags - - @classmethod - def _metrics_tags(cls, span: Span) -> List[str]: - provider = span.get_tag(PROVIDER) or "" - api_key = span.get_tag(API_KEY) or "" - tags = [ - "version:%s" % (config.version or ""), - "env:%s" % (config.env or ""), - "service:%s" % (span.service or ""), - "%s:%s" % (PROVIDER, provider), - "%s:%s" % (MODEL, span.get_tag(MODEL) or ""), - "%s:%s" % (TYPE, span.get_tag(TYPE) or ""), - "%s:%s" % (API_KEY, api_key), - "error:%d" % span.error, - ] - err_type = span.get_tag(ERROR_TYPE) - if err_type: - tags.append("%s:%s" % (ERROR_TYPE, err_type)) - return tags - - def record_usage(self, span: Span, usage: Dict[str, Any]) -> None: - if not usage or self.metrics_enabled is False: - return - for token_type in ("prompt", "completion", "total"): - num_tokens = usage.get("token_usage", {}).get(token_type + "_tokens") - if not num_tokens: - continue - self.metric(span, "dist", "tokens.%s" % token_type, num_tokens) - total_cost = span.get_metric(TOTAL_COST) - if total_cost: - self.metric(span, "incr", "tokens.total_cost", total_cost) - def check_token_usage_chat_or_llm_result(self, result): """Checks for token usage on the top-level ChatResult or LLMResult object""" llm_output = getattr(result, "llm_output", {}) diff --git a/ddtrace/llmobs/_integrations/openai.py b/ddtrace/llmobs/_integrations/openai.py index 7ed3aace08a..eb01a679191 100644 --- a/ddtrace/llmobs/_integrations/openai.py +++ b/ddtrace/llmobs/_integrations/openai.py @@ -5,7 +5,6 @@ from typing import Optional from typing import Tuple -from ddtrace import config from ddtrace.internal.constants import COMPONENT from ddtrace.internal.utils.version import parse_version from ddtrace.llmobs._constants import INPUT_DOCUMENTS @@ -21,6 +20,7 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration +from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags from ddtrace.llmobs._utils import _get_attr from ddtrace.llmobs.utils import Document from ddtrace.trace import Pin @@ -88,54 +88,14 @@ def _is_azure_openai(span): return False return "azure" in base_url.lower() - @classmethod - def _logs_tags(cls, span: Span) -> str: - tags = ( - "env:%s,version:%s,openai.request.endpoint:%s,openai.request.method:%s,openai.request.model:%s,openai.organization.name:%s," - "openai.user.api_key:%s" - % ( # noqa: E501 - (config.env or ""), - (config.version or ""), - (span.get_tag("openai.request.endpoint") or ""), - (span.get_tag("openai.request.method") or ""), - (span.get_tag("openai.request.model") or ""), - (span.get_tag("openai.organization.name") or ""), - (span.get_tag("openai.user.api_key") or ""), - ) - ) - return tags - - @classmethod - def _metrics_tags(cls, span: Span) -> List[str]: - model_name = span.get_tag("openai.request.model") or "" - tags = [ - "version:%s" % (config.version or ""), - "env:%s" % (config.env or ""), - "service:%s" % (span.service or ""), - "openai.request.model:%s" % model_name, - "model:%s" % model_name, - "openai.request.endpoint:%s" % (span.get_tag("openai.request.endpoint") or ""), - "openai.request.method:%s" % (span.get_tag("openai.request.method") or ""), - "openai.organization.id:%s" % (span.get_tag("openai.organization.id") or ""), - "openai.organization.name:%s" % (span.get_tag("openai.organization.name") or ""), - "openai.user.api_key:%s" % (span.get_tag("openai.user.api_key") or ""), - "error:%d" % span.error, - ] - err_type = span.get_tag("error.type") - if err_type: - tags.append("error_type:%s" % err_type) - return tags - def record_usage(self, span: Span, usage: Dict[str, Any]) -> None: - if not usage or not self.metrics_enabled: + if not usage: return - tags = ["openai.estimated:false"] for token_type in ("prompt", "completion", "total"): num_tokens = getattr(usage, token_type + "_tokens", None) if not num_tokens: continue span.set_metric("openai.response.usage.%s_tokens" % token_type, num_tokens) - self.metric(span, "dist", "tokens.%s" % token_type, num_tokens, tags=tags) def _llmobs_set_tags( self, @@ -275,12 +235,4 @@ def _extract_llmobs_metrics_tags(span: Span, resp: Any) -> Dict[str, Any]: OUTPUT_TOKENS_METRIC_KEY: completion_tokens, TOTAL_TOKENS_METRIC_KEY: prompt_tokens + completion_tokens, } - prompt_tokens = span.get_metric("openai.response.usage.prompt_tokens") - completion_tokens = span.get_metric("openai.response.usage.completion_tokens") - if prompt_tokens is None or completion_tokens is None: - return {} - return { - INPUT_TOKENS_METRIC_KEY: prompt_tokens, - OUTPUT_TOKENS_METRIC_KEY: completion_tokens, - TOTAL_TOKENS_METRIC_KEY: prompt_tokens + completion_tokens, - } + return get_llmobs_metrics_tags("openai", span) diff --git a/ddtrace/llmobs/_integrations/utils.py b/ddtrace/llmobs/_integrations/utils.py index f180e0c1820..331b4e3062d 100644 --- a/ddtrace/llmobs/_integrations/utils.py +++ b/ddtrace/llmobs/_integrations/utils.py @@ -118,10 +118,29 @@ def extract_message_from_part_google(part, role=None): return message -def get_llmobs_metrics_tags_google(integration_name, span): +def get_llmobs_metrics_tags(integration_name, span): usage = {} - input_tokens = span.get_metric("%s.response.usage.prompt_tokens" % integration_name) - output_tokens = span.get_metric("%s.response.usage.completion_tokens" % integration_name) + + # bedrock integration tags usage under meta instead of metrics + if integration_name == "bedrock": + input_tokens = int(span.get_tag("bedrock.usage.prompt_tokens") or 0) + output_tokens = int(span.get_tag("bedrock.usage.completion_tokens") or 0) + total_tokens = input_tokens + output_tokens + if input_tokens: + usage[INPUT_TOKENS_METRIC_KEY] = input_tokens + if output_tokens: + usage[OUTPUT_TOKENS_METRIC_KEY] = output_tokens + if total_tokens: + usage[TOTAL_TOKENS_METRIC_KEY] = total_tokens + return usage + + # check for both prompt / completion or input / output tokens + input_tokens = span.get_metric("%s.response.usage.prompt_tokens" % integration_name) or span.get_metric( + "%s.response.usage.input_tokens" % integration_name + ) + output_tokens = span.get_metric("%s.response.usage.completion_tokens" % integration_name) or span.get_metric( + "%s.response.usage.output_tokens" % integration_name + ) total_tokens = span.get_metric("%s.response.usage.total_tokens" % integration_name) if input_tokens is not None: diff --git a/ddtrace/llmobs/_integrations/vertexai.py b/ddtrace/llmobs/_integrations/vertexai.py index 88d38f1975e..db40ac15b19 100644 --- a/ddtrace/llmobs/_integrations/vertexai.py +++ b/ddtrace/llmobs/_integrations/vertexai.py @@ -15,7 +15,7 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._integrations.utils import extract_message_from_part_google -from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags_google +from ddtrace.llmobs._integrations.utils import get_llmobs_metrics_tags from ddtrace.llmobs._integrations.utils import get_system_instructions_from_google_model from ddtrace.llmobs._integrations.utils import llmobs_get_metadata_google from ddtrace.llmobs._utils import _get_attr @@ -65,7 +65,7 @@ def _llmobs_set_tags( METADATA: metadata, INPUT_MESSAGES: input_messages, OUTPUT_MESSAGES: output_messages, - METRICS: get_llmobs_metrics_tags_google("vertexai", span), + METRICS: get_llmobs_metrics_tags("vertexai", span), } ) diff --git a/ddtrace/opentracer/tracer.py b/ddtrace/opentracer/tracer.py index ca10cb8125a..65d1b95b314 100644 --- a/ddtrace/opentracer/tracer.py +++ b/ddtrace/opentracer/tracer.py @@ -18,7 +18,6 @@ from ddtrace.trace import Context as DatadogContext # noqa:F401 from ddtrace.trace import Span as DatadogSpan from ddtrace.trace import Tracer as DatadogTracer -from ddtrace.vendor.debtcollector import deprecate from ..internal.logger import get_logger from .propagation import HTTPPropagator @@ -55,7 +54,7 @@ def __init__( service_name: Optional[str] = None, config: Optional[Dict[str, Any]] = None, scope_manager: Optional[ScopeManager] = None, - dd_tracer: Optional[DatadogTracer] = None, + _dd_tracer: Optional[DatadogTracer] = None, ) -> None: """Initialize a new Datadog opentracer. @@ -70,9 +69,6 @@ def __init__( here: https://github.com/opentracing/opentracing-python#scope-managers. If ``None`` is provided, defaults to :class:`opentracing.scope_managers.ThreadLocalScopeManager`. - :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This - parameter is deprecated and will be removed in v3.0.0. The - to the global tracer (``ddtrace.tracer``) should always be used. """ # Merge the given config with the default into a new dict self._config = DEFAULT_CONFIG.copy() @@ -100,14 +96,7 @@ def __init__( self._scope_manager = scope_manager or ThreadLocalScopeManager() dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager) - if dd_tracer is not None: - deprecate( - "The ``dd_tracer`` parameter is deprecated", - message="The global tracer (``ddtrace.tracer``) will be used instead.", - removal_version="3.0.0", - ) - - self._dd_tracer = dd_tracer or ddtrace.tracer + self._dd_tracer = _dd_tracer or ddtrace.tracer self._dd_tracer.set_tags(self._config.get(keys.GLOBAL_TAGS)) # type: ignore[arg-type] trace_processors = None if keys.SETTINGS in self._config: @@ -121,7 +110,7 @@ def __init__( trace_processors=trace_processors, priority_sampling=self._config.get(keys.PRIORITY_SAMPLING), uds_path=self._config.get(keys.UDS_PATH), - context_provider=dd_context_provider, # type: ignore[arg-type] + context_provider=dd_context_provider, ) self._propagators = { Format.HTTP_HEADERS: HTTPPropagator, diff --git a/ddtrace/pin.py b/ddtrace/pin.py deleted file mode 100644 index 0e683b3b22e..00000000000 --- a/ddtrace/pin.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace._trace.pin import * # noqa: F403 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.trace.Pin module is deprecated and will be removed.", - message="Import ``Pin`` from the ddtrace.trace package.", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/provider.py b/ddtrace/provider.py deleted file mode 100644 index 7b9867de01a..00000000000 --- a/ddtrace/provider.py +++ /dev/null @@ -1,14 +0,0 @@ -from ddtrace._trace.provider import BaseContextProvider # noqa: F401 -from ddtrace._trace.provider import DatadogContextMixin # noqa: F401 -from ddtrace._trace.provider import DefaultContextProvider # noqa: F401 -from ddtrace.internal.ci_visibility.context import CIContextProvider # noqa: F401 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The context provider interface is deprecated", - message="Import BaseContextProvider from `ddtrace.trace` instead.", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", -) diff --git a/ddtrace/sampler.py b/ddtrace/sampler.py deleted file mode 100644 index c7f4b9d499a..00000000000 --- a/ddtrace/sampler.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace._trace.sampler import * # noqa: F403 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.sampler module is deprecated and will be removed.", - message="Use DD_TRACE_SAMPLING_RULES to configure sampling rates.", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/sampling_rule.py b/ddtrace/sampling_rule.py deleted file mode 100644 index 244cebddd31..00000000000 --- a/ddtrace/sampling_rule.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace._trace.sampling_rule import * # noqa: F403 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.sample_rule module is deprecated and will be removed.", - message="Use DD_TRACE_SAMPLING_RULES to set sampling rules.", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/settings/_config.py b/ddtrace/settings/_config.py index 35d2849884d..0072986286e 100644 --- a/ddtrace/settings/_config.py +++ b/ddtrace/settings/_config.py @@ -16,8 +16,6 @@ from ddtrace.internal.serverless import in_gcp_function from ddtrace.internal.telemetry import telemetry_writer from ddtrace.internal.utils.cache import cachedmethod -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate from .._trace.pin import Pin from ..internal import gitmetadata @@ -264,9 +262,11 @@ def _parse_global_tags(s): def _default_config() -> Dict[str, _ConfigItem]: return { + # Remove the _trace_sample_rate property, _trace_sampling_rules should be the source of truth "_trace_sample_rate": _ConfigItem( default=1.0, - envs=[("DD_TRACE_SAMPLE_RATE", float)], + # trace_sample_rate is placeholder, this code will be removed up after v3.0 + envs=[("trace_sample_rate", float)], ), "_trace_sampling_rules": _ConfigItem( default=lambda: "", @@ -352,14 +352,6 @@ def __init__(self): self._from_endpoint = ENDPOINT_FETCHED_CONFIG self._config = _default_config() - sample_rate = os.getenv("DD_TRACE_SAMPLE_RATE") - if sample_rate is not None: - deprecate( - "DD_TRACE_SAMPLE_RATE is deprecated", - message="Please use DD_TRACE_SAMPLING_RULES instead.", - removal_version="3.0.0", - ) - # Use a dict as underlying storing mechanism for integration configs self._integration_configs = {} @@ -368,9 +360,6 @@ def __init__(self): rate_limit = os.getenv("DD_TRACE_RATE_LIMIT") if rate_limit is not None and self._trace_sampling_rules in ("", "[]"): - # This warning will be logged when DD_TRACE_SAMPLE_RATE is set. This is intentional. - # Even though DD_TRACE_SAMPLE_RATE is treated as a global trace sampling rule, this configuration - # is deprecated. We should always encourage users to set DD_TRACE_SAMPLING_RULES instead. log.warning( "DD_TRACE_RATE_LIMIT is set to %s and DD_TRACE_SAMPLING_RULES is not set. " "Tracer rate limiting is only applied to spans that match tracer sampling rules. " @@ -388,13 +377,9 @@ def __init__(self): ) self._trace_api = _get_config("DD_TRACE_API_VERSION") if self._trace_api == "v0.3": - deprecate( - "DD_TRACE_API_VERSION=v0.3 is deprecated", - message="Traces will be submitted to the v0.4/traces agent endpoint instead.", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, + log.error( + "Setting DD_TRACE_API_VERSION to ``v0.3`` is not supported. The default ``v0.5`` format will be used.", ) - self._trace_api = "v0.4" self._trace_writer_buffer_size = _get_config("DD_TRACE_WRITER_BUFFER_SIZE_BYTES", DEFAULT_BUFFER_SIZE, int) self._trace_writer_payload_size = _get_config( "DD_TRACE_WRITER_MAX_PAYLOAD_SIZE_BYTES", DEFAULT_MAX_PAYLOAD_SIZE, int @@ -418,18 +403,8 @@ def __init__(self): self._span_traceback_max_size = _get_config("DD_TRACE_SPAN_TRACEBACK_MAX_SIZE", 30, int) - # Master switch for turning on and off trace search by default - # this weird invocation of getenv is meant to read the DD_ANALYTICS_ENABLED - # legacy environment variable. It should be removed in the future - self._analytics_enabled = _get_config(["DD_TRACE_ANALYTICS_ENABLED", "DD_ANALYTICS_ENABLED"], False, asbool) - if self._analytics_enabled: - deprecate( - "Datadog App Analytics is deprecated and will be removed in a future version. " - "App Analytics can be enabled via DD_TRACE_ANALYTICS_ENABLED and DD_ANALYTICS_ENABLED " - "environment variables and ddtrace.config.analytics_enabled configuration. " - "These configurations will also be removed.", - category=DDTraceDeprecationWarning, - ) + # DD_ANALYTICS_ENABLED is not longer supported, remove this functionatiy from all integrations in the future + self._analytics_enabled = False self._client_ip_header = _get_config("DD_TRACE_CLIENT_IP_HEADER") self._retrieve_client_ip = _get_config("DD_TRACE_CLIENT_IP_ENABLED", False, asbool) @@ -477,14 +452,6 @@ def __init__(self): self._128_bit_trace_id_enabled = _get_config("DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED", True, asbool) self._128_bit_trace_id_logging_enabled = _get_config("DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED", False, asbool) - if self._128_bit_trace_id_logging_enabled: - deprecate( - "Using DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED is deprecated.", - message="Log injection format is now configured automatically.", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, - ) - self._sampling_rules = _get_config("DD_SPAN_SAMPLING_RULES") self._sampling_rules_file = _get_config("DD_SPAN_SAMPLING_RULES_FILE") @@ -536,18 +503,7 @@ def __init__(self): ["DD_TRACE_COMPUTE_STATS", "DD_TRACE_STATS_COMPUTATION_ENABLED"], trace_compute_stats_default, asbool ) self._data_streams_enabled = _get_config("DD_DATA_STREAMS_ENABLED", False, asbool) - - legacy_client_tag_enabled = _get_config("DD_HTTP_CLIENT_TAG_QUERY_STRING") - if legacy_client_tag_enabled is None: - self._http_client_tag_query_string = _get_config("DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING", "true") - else: - deprecate( - "DD_HTTP_CLIENT_TAG_QUERY_STRING is deprecated", - message="Please use DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING instead.", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, - ) - self._http_client_tag_query_string = legacy_client_tag_enabled.lower() + self._http_client_tag_query_string = _get_config("DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING", "true") dd_trace_obfuscation_query_string_regexp = _get_config( "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP", DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP_DEFAULT @@ -577,15 +533,8 @@ def __init__(self): # https://github.com/open-telemetry/opentelemetry-python/blob/v1.16.0/opentelemetry-api/src/opentelemetry/context/__init__.py#L53 os.environ["OTEL_PYTHON_CONTEXT"] = "ddcontextvars_context" self._subscriptions = [] # type: List[Tuple[List[str], Callable[[Config, List[str]], None]]] - self._span_aggregator_rlock = _get_config("DD_TRACE_SPAN_AGGREGATOR_RLOCK", True, asbool) - if self._span_aggregator_rlock is False: - deprecate( - "DD_TRACE_SPAN_AGGREGATOR_RLOCK is deprecated", - message="Soon the ddtrace library will only support using threading.Rlock to " - "aggregate and encode span data. If you need to disable the re-entrant lock and " - "revert to using threading.Lock, please contact Datadog support.", - removal_version="3.0.0", - ) + # Disabled Span Aggregator Rlock is not supported. Remove this configuration in the future + self._span_aggregator_rlock = True self._trace_methods = _get_config("DD_TRACE_METHODS") diff --git a/ddtrace/settings/_otel_remapper.py b/ddtrace/settings/_otel_remapper.py index ec238e8a3cb..e495f783cd3 100644 --- a/ddtrace/settings/_otel_remapper.py +++ b/ddtrace/settings/_otel_remapper.py @@ -52,12 +52,16 @@ def _remap_traces_sampler(otel_value: str) -> Optional[str]: otel_value, ) otel_value = f"parentbased_{otel_value}" + rate = None if otel_value == "parentbased_always_on": - return "1.0" + rate = "1.0" elif otel_value == "parentbased_always_off": - return "0.0" + rate = "0.0" elif otel_value == "parentbased_traceidratio": - return os.environ.get("OTEL_TRACES_SAMPLER_ARG", "1") + rate = os.environ.get("OTEL_TRACES_SAMPLER_ARG", "1") + + if rate is not None: + return f'[{{"sample_rate":{rate}}}]' return None @@ -130,7 +134,7 @@ def _remap_default(otel_value: str) -> Optional[str]: "OTEL_SERVICE_NAME": ("DD_SERVICE", _remap_default), "OTEL_LOG_LEVEL": ("DD_TRACE_DEBUG", _remap_otel_log_level), "OTEL_PROPAGATORS": ("DD_TRACE_PROPAGATION_STYLE", _remap_otel_propagators), - "OTEL_TRACES_SAMPLER": ("DD_TRACE_SAMPLE_RATE", _remap_traces_sampler), + "OTEL_TRACES_SAMPLER": ("DD_TRACE_SAMPLING_RULES", _remap_traces_sampler), "OTEL_TRACES_EXPORTER": ("DD_TRACE_ENABLED", _remap_traces_exporter), "OTEL_METRICS_EXPORTER": ("DD_RUNTIME_METRICS_ENABLED", _remap_metrics_exporter), "OTEL_LOGS_EXPORTER": ("", _validate_logs_exporter), # Does not set a DDTRACE environment variable. diff --git a/ddtrace/settings/asm.py b/ddtrace/settings/asm.py index 4024c13f982..8cb35132d47 100644 --- a/ddtrace/settings/asm.py +++ b/ddtrace/settings/asm.py @@ -17,6 +17,7 @@ from ddtrace.appsec._constants import LOGIN_EVENTS_MODE from ddtrace.appsec._constants import TELEMETRY_INFORMATION_NAME from ddtrace.constants import APPSEC_ENV +from ddtrace.internal.serverless import in_aws_lambda from ddtrace.settings._core import report_telemetry as _report_telemetry @@ -224,15 +225,21 @@ class ASMConfig(Env): def __init__(self): super().__init__() - # Is one click available? - self._eval_asm_can_be_enabled() - if not self._asm_libddwaf_available: + if not self._iast_supported: + self._iast_enabled = False + if not self._asm_libddwaf_available or in_aws_lambda(): self._asm_enabled = False self._asm_can_be_enabled = False self._iast_enabled = False self._api_security_enabled = False - if not self._iast_supported: - self._iast_enabled = False + self._ep_enabled = False + self._auto_user_instrumentation_enabled = False + self._auto_user_instrumentation_local_mode = LOGIN_EVENTS_MODE.DISABLED + self._load_modules = False + self._asm_rc_enabled = False + else: + # Is one click available? + self._eval_asm_can_be_enabled() def reset(self): """For testing purposes, reset the configuration to its default values given current environment variables.""" @@ -240,6 +247,10 @@ def reset(self): def _eval_asm_can_be_enabled(self): self._asm_can_be_enabled = APPSEC_ENV not in os.environ and tracer_config._remote_config_enabled + self._load_modules: bool = bool( + self._iast_enabled or (self._ep_enabled and (self._asm_enabled or self._asm_can_be_enabled)) + ) + self._asm_rc_enabled = (self._asm_enabled and tracer_config._remote_config_enabled) or self._asm_can_be_enabled @property def _api_security_feature_active(self) -> bool: diff --git a/ddtrace/settings/config.py b/ddtrace/settings/config.py deleted file mode 100644 index 00c0ee9917c..00000000000 --- a/ddtrace/settings/config.py +++ /dev/null @@ -1,11 +0,0 @@ -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.settings._config import * # noqa: F403 -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.settings.config module is deprecated", - message="Access the global configuration using ``ddtrace.config``.", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", -) diff --git a/ddtrace/settings/integration.py b/ddtrace/settings/integration.py index 354e99f7625..eef7f5c81c6 100644 --- a/ddtrace/settings/integration.py +++ b/ddtrace/settings/integration.py @@ -1,13 +1,8 @@ import os from typing import Optional # noqa:F401 -from typing import Tuple # noqa:F401 - -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate from .._hooks import Hooks from ..internal.utils.attrdict import AttrDict -from ..internal.utils.formats import asbool from .http import HttpConfig @@ -43,9 +38,10 @@ def __init__(self, global_config, name, *args, **kwargs): object.__setattr__(self, "hooks", Hooks()) object.__setattr__(self, "http", HttpConfig()) - analytics_enabled, analytics_sample_rate = self._get_analytics_settings() - self.setdefault("analytics_enabled", analytics_enabled) - self.setdefault("analytics_sample_rate", float(analytics_sample_rate)) + # Trace Analytics was removed in v3.0.0 + # TODO(munir): Remove all references to analytics_enabled and analytics_sample_rate + self.setdefault("analytics_enabled", False) + self.setdefault("analytics_sample_rate", 1.0) service = os.getenv( "DD_%s_SERVICE" % name.upper(), default=os.getenv( @@ -65,33 +61,6 @@ def __init__(self, global_config, name, *args, **kwargs): self.get_http_tag_query_string(getattr(self, "default_http_tag_query_string", None)), ) - def _get_analytics_settings(self): - # type: () -> Tuple[Optional[bool], float] - # Set default analytics configuration, default is disabled - # DEV: Default to `None` which means do not set this key - # Inject environment variables for integration - env = "DD_TRACE_%s_ANALYTICS_ENABLED" % self.integration_name.upper() - legacy_env = "DD_%s_ANALYTICS_ENABLED" % self.integration_name.upper() - analytics_enabled = asbool(os.getenv(env, os.getenv(legacy_env, default=None))) - - if analytics_enabled: - deprecate( - "Datadog App Analytics is deprecated. " - f"App Analytics can be enabled via {env} and {legacy_env} " - f"environment variables and the ddtrace.config.{self.integration_name}.analytics_enabled configuration." - " This feature and its associated configurations will be removed in a future release.", - category=DDTraceDeprecationWarning, - ) - - analytics_sample_rate = float( - os.getenv( - "DD_TRACE_%s_ANALYTICS_SAMPLE_RATE" % self.integration_name.upper(), - os.getenv("DD_%s_ANALYTICS_SAMPLE_RATE" % self.integration_name.upper(), default=1.0), - ) - ) - - return analytics_enabled, analytics_sample_rate - def get_http_tag_query_string(self, value): if self.global_config._http_tag_query_string: dd_http_server_tag_query_string = value if value else os.getenv("DD_HTTP_SERVER_TAG_QUERY_STRING", "true") diff --git a/ddtrace/settings/symbol_db.py b/ddtrace/settings/symbol_db.py index 2203b6f3f75..9637b97abd1 100644 --- a/ddtrace/settings/symbol_db.py +++ b/ddtrace/settings/symbol_db.py @@ -11,7 +11,7 @@ class SymbolDatabaseConfig(En): enabled = En.v( bool, "upload_enabled", - default=False, + default=True, help_type="Boolean", help="Whether to upload source code symbols to the Datadog backend", ) diff --git a/ddtrace/span.py b/ddtrace/span.py deleted file mode 100644 index 48f1835262c..00000000000 --- a/ddtrace/span.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.trace import Span # noqa: F401 -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The span module is deprecated and will be moved.", - message="A new span interface will be provided by the trace sub-package.", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/tracer.py b/ddtrace/tracer.py deleted file mode 100644 index afb4e05492d..00000000000 --- a/ddtrace/tracer.py +++ /dev/null @@ -1,10 +0,0 @@ -from ddtrace._trace.tracer import Tracer # noqa: F401 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The ddtrace.tracer module is deprecated and will be removed.", - message="A new interface will be provided by the trace sub-package.", - category=DDTraceDeprecationWarning, -) diff --git a/ddtrace/tracing/__init__.py b/ddtrace/tracing/__init__.py deleted file mode 100644 index c66bb230093..00000000000 --- a/ddtrace/tracing/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from ddtrace._trace import trace_handlers # noqa: F401 -from ddtrace._trace._span_link import SpanLink # noqa: F401 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - - -deprecate( - "The tracing module is deprecated and will be moved.", - message="A new interface will be provided by the _trace sub-package.", - category=DDTraceDeprecationWarning, -) diff --git a/docker-compose.yml b/docker-compose.yml index 701b5a7d0f0..642575a19d7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -198,7 +198,6 @@ services: ports: - "127.0.0.1:8321:8321" environment: - - DD_APPSEC_ENABLED=true - DD_IAST_ENABLED=true - DD_IAST_REQUEST_SAMPLING=100 - DD_IAST_VULNERABILITIES_PER_REQUEST=100 @@ -207,5 +206,23 @@ services: - DD_TRACE_AGENT_URL=http://testagent:8126 - DD_IAST_DEDUPLICATION_ENABLED=false + valkey: + image: valkey/valkey:8.0-alpine + ports: + - "127.0.0.1:6379:6379" + + valkeycluster: + platform: linux/amd64 + image: grokzen/redis-cluster:6.2.0 + environment: + - IP=0.0.0.0 + ports: + - "127.0.0.1:7000:7000" + - "127.0.0.1:7001:7001" + - "127.0.0.1:7002:7002" + - "127.0.0.1:7003:7003" + - "127.0.0.1:7004:7004" + - "127.0.0.1:7005:7005" + volumes: ddagent: diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 5fe21321680..e6ead60c1f5 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -1,28 +1,6 @@ Advanced Usage ============== -.. _agentconfiguration: - -Agent Configuration -------------------- - -If the Datadog Agent is on a separate host from your application, you can modify -the default ``ddtrace.tracer`` object to utilize another hostname and port. Here -is a small example showcasing this:: - - from ddtrace.trace import tracer - - tracer.configure(hostname=, port=, https=) - -By default, these will be set to ``localhost``, ``8126``, and ``False`` respectively. - -You can also use a Unix Domain Socket to connect to the agent:: - - from ddtrace.trace import tracer - - tracer.configure(uds_path="/path/to/socket") - - .. _context: @@ -223,7 +201,7 @@ provider can be used. It must implement the :class:`ddtrace.trace.BaseContextProvider` interface and can be configured with:: - tracer.configure(context_provider=MyContextProvider) + tracer.configure(context_provider=MyContextProvider()) .. _disttracing: diff --git a/docs/configuration.rst b/docs/configuration.rst index f45ac992582..6f5c87a945e 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -817,6 +817,7 @@ Sampling version_added: v0.33.0: v2.15.0: Only applied when DD_TRACE_SAMPLE_RATE, DD_TRACE_SAMPLING_RULES, or DD_SPAN_SAMPLING_RULE are set. + v3.0.0: Only applied when DD_TRACE_SAMPLING_RULES or DD_SPAN_SAMPLING_RULE are set. DD_TRACE_SAMPLING_RULES: type: JSON array diff --git a/docs/index.rst b/docs/index.rst index dbe0d61bb7d..3526d6a6f18 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -174,14 +174,14 @@ contacting support. +--------------------------------------------------+---------------+----------------+ | :ref:`urllib3` | >= 1.25.8 | No | +--------------------------------------------------+---------------+----------------+ +| :ref:`valkey` | >= 6.0.0 | Yes | ++--------------------------------------------------+---------------+----------------+ | :ref:`vertexai` | >= 1.71.1 | Yes | +--------------------------------------------------+---------------+----------------+ | :ref:`vertica` | >= 0.6 | Yes | +--------------------------------------------------+---------------+----------------+ | :ref:`wsgi` | \* | No | +--------------------------------------------------+---------------+----------------+ -| :ref:`yaaredis` | >= 2.0.0 | Yes | -+--------------------------------------------------+---------------+----------------+ .. [1] Libraries that are automatically instrumented when the diff --git a/docs/integrations.rst b/docs/integrations.rst index 0566ab7fa9c..f9601e3aeb0 100644 --- a/docs/integrations.rst +++ b/docs/integrations.rst @@ -485,6 +485,13 @@ urllib3 .. automodule:: ddtrace.contrib._urllib3 +.. _valkey: + +valkey +^^^^^^ +.. automodule:: ddtrace.contrib.valkey + + .. _vertexai: vertexai @@ -498,12 +505,6 @@ Vertica ^^^^^^^ .. automodule:: ddtrace.contrib._vertica -.. _yaaredis: - -yaaredis -^^^^^^^^ -.. automodule:: ddtrace.contrib._yaaredis - .. _wsgi: WSGI diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index ff2cfc09c6d..cc2969bd5c9 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -301,6 +301,7 @@ urls username uvicorn uWSGI +valkey vendored versioned vertexai @@ -314,4 +315,3 @@ workflow Wrapt wsgi xfail -yaaredis diff --git a/hatch.toml b/hatch.toml index 6dcd32e6794..f5d3b99dd28 100644 --- a/hatch.toml +++ b/hatch.toml @@ -374,7 +374,7 @@ dependencies = [ test = [ "uname -a", "pip freeze", - "DD_TRACE_AGENT_URL=\"http://testagent:9126\" DD_CIVISIBILITY_ITR_ENABLED=0 DD_IAST_REQUEST_SAMPLING=100 DD_IAST_DEDUPLICATION_ENABLED=false python -m pytest -vvv {args:tests/appsec/integrations/flask_tests/}", + "DD_TRACE_AGENT_URL=\"http://testagent:9126\" DD_CIVISIBILITY_ITR_ENABLED=0 DD_IAST_ENABLED=true DD_IAST_REQUEST_SAMPLING=100 DD_IAST_DEDUPLICATION_ENABLED=false python -m pytest -vvv {args:tests/appsec/integrations/flask_tests/}", ] [[envs.appsec_integrations_flask.matrix]] @@ -399,6 +399,52 @@ flask = ["~=2.2"] python = ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] flask = ["~=3.0"] +[[envs.appsec_integrations_flask.matrix]] +# werkzeug 3.1 drops support for py3.8 +python = ["3.11", "3.12", "3.13"] +flask = ["~=3.1"] +werkzeug = ["~=3.1"] + +## ASM appsec_integrations_fastapi + +[envs.appsec_integrations_fastapi] +template = "appsec_integrations_fastapi" +dependencies = [ + "pytest", + "pytest-cov", + "requests", + "hypothesis", + "jinja2", + "httpx<0.28.0", + "anyio{matrix:anyio:}", + "fastapi{matrix:fastapi}" +] + +[envs.appsec_integrations_fastapi.env-vars] +CMAKE_BUILD_PARALLEL_LEVEL = "12" + +[envs.appsec_integrations_fastapi.scripts] +test = [ + "uname -a", + "pip freeze", + "DD_TRACE_AGENT_URL=\"http://testagent:9126\" DD_CIVISIBILITY_ITR_ENABLED=0 DD_IAST_REQUEST_SAMPLING=100 DD_IAST_DEDUPLICATION_ENABLED=false python -m pytest -vvv {args:tests/appsec/integrations/fastapi_tests/}", +] + + +# if you add or remove a version here, please also update the parallelism parameter +# in .circleci/config.templ.yml +[[envs.appsec_integrations_fastapi.matrix]] +python = ["3.8", "3.10", "3.13"] +fastapi = ["==0.86.0"] +anyio = ["==3.7.1"] + +[[envs.appsec_integrations_fastapi.matrix]] +python = ["3.8", "3.10", "3.13"] +fastapi = ["==0.94.1"] + +[[envs.appsec_integrations_fastapi.matrix]] +python = ["3.8", "3.10", "3.13"] +fastapi = ["~=0.114.2"] ## ASM FastAPI @@ -466,7 +512,8 @@ _DD_IAST_PATCH_MODULES = "scripts.iast" test = [ "uname -a", "pip freeze", - "python -m pytest tests/appsec/iast_aggregated_memcheck/test_aggregated_memleaks.py", + # We use --no-cov due to a pytest-cov problem with eval https://github.com/pytest-dev/pytest-cov/issues/676 + "python -m pytest --no-cov tests/appsec/iast_aggregated_memcheck/test_aggregated_memleaks.py", ] [[envs.iast_aggregated_leak_testing.matrix]] diff --git a/lib-injection/sources/min_compatible_versions.csv b/lib-injection/sources/min_compatible_versions.csv index c7366036a89..0c61576da1a 100644 --- a/lib-injection/sources/min_compatible_versions.csv +++ b/lib-injection/sources/min_compatible_versions.csv @@ -191,6 +191,5 @@ webtest,0 werkzeug,<1.0 wheel,0 xmltodict,>=0.12 -yaaredis,~=2.0.0 yarl,~=1.0 zeep,0 diff --git a/lib-injection/sources/sitecustomize.py b/lib-injection/sources/sitecustomize.py index 32ab1c31ff3..0daa9c2413a 100644 --- a/lib-injection/sources/sitecustomize.py +++ b/lib-injection/sources/sitecustomize.py @@ -35,7 +35,7 @@ def parse_version(version): SCRIPT_DIR = os.path.dirname(__file__) RUNTIMES_ALLOW_LIST = { "cpython": { - "min": Version(version=(3, 7), constraint=""), + "min": Version(version=(3, 8), constraint=""), "max": Version(version=(3, 13), constraint=""), } } diff --git a/min_compatible_versions.csv b/min_compatible_versions.csv index c7366036a89..dd91aedc2b4 100644 --- a/min_compatible_versions.csv +++ b/min_compatible_versions.csv @@ -182,6 +182,7 @@ typing-extensions,0 typing_extensions,0 urllib3,~=1.0 uwsgi,0 +valkey,~=6.0.0 vcrpy,==4.2.1 vertexai,0 vertica-python,>=0.6.0 @@ -191,6 +192,5 @@ webtest,0 werkzeug,<1.0 wheel,0 xmltodict,>=0.12 -yaaredis,~=2.0.0 yarl,~=1.0 zeep,0 diff --git a/releasenotes/notes/add-valkey-support-6cc9f41351dc0cd9.yaml b/releasenotes/notes/add-valkey-support-6cc9f41351dc0cd9.yaml new file mode 100644 index 00000000000..945f8eb6ec9 --- /dev/null +++ b/releasenotes/notes/add-valkey-support-6cc9f41351dc0cd9.yaml @@ -0,0 +1,3 @@ +features: + - | + valkey: adds automatic instrumentation of the Valkey package. Thank you [AhmadMasry](https://github.com/AhmadMasry)! \ No newline at end of file diff --git a/releasenotes/notes/add_aiohttp_memory_leak_flag-66005f987dbfbd47.yaml b/releasenotes/notes/add_aiohttp_memory_leak_flag-66005f987dbfbd47.yaml new file mode 100644 index 00000000000..67ef6980a36 --- /dev/null +++ b/releasenotes/notes/add_aiohttp_memory_leak_flag-66005f987dbfbd47.yaml @@ -0,0 +1,5 @@ +--- + +fixes: + - | + aiohttp: Adds the environment variable ``DD_AIOHTTP_CLIENT_DISABLE_STREAM_TIMING_FOR_MEM_LEAK`` to address a potential memory leak in the aiohttp integration. When set to true, this flag may cause streamed response span timing to be inaccurate. The flag defaults to false. \ No newline at end of file diff --git a/releasenotes/notes/deprecate-langchain-openai-cost-042f3a04cda9d23b.yaml b/releasenotes/notes/deprecate-langchain-openai-cost-042f3a04cda9d23b.yaml new file mode 100644 index 00000000000..c2c589693ab --- /dev/null +++ b/releasenotes/notes/deprecate-langchain-openai-cost-042f3a04cda9d23b.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + langchain: Removes the `langchain.tokens.total_cost` span metric for OpenAI calls. + For continued cost estimation of OpenAI calls, enable `LLM Observability `_. diff --git a/releasenotes/notes/drop-multitracer-support-ec109486f71c9c62.yaml b/releasenotes/notes/drop-multitracer-support-ec109486f71c9c62.yaml new file mode 100644 index 00000000000..79a86b59d05 --- /dev/null +++ b/releasenotes/notes/drop-multitracer-support-ec109486f71c9c62.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + tracing: Drops support for multiple Tracer instances in the same process. Use ``ddtrace.trace.tracer`` to access the global tracer instance. \ No newline at end of file diff --git a/releasenotes/notes/ensure_no_appsec_loading-8ce46c58d6ecf81f.yaml b/releasenotes/notes/ensure_no_appsec_loading-8ce46c58d6ecf81f.yaml new file mode 100644 index 00000000000..b7c34f83779 --- /dev/null +++ b/releasenotes/notes/ensure_no_appsec_loading-8ce46c58d6ecf81f.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + ASM: This ensures that no module from ASM are loaded when ASM is disabled or unavailable. + SCA: This ensures that no module from IAST are loaded when IAST is disabled or unavailable. \ No newline at end of file diff --git a/releasenotes/notes/feat-symdb-enabled-by-default-25eebb43fc8c5a0d.yaml b/releasenotes/notes/feat-symdb-enabled-by-default-25eebb43fc8c5a0d.yaml new file mode 100644 index 00000000000..566990162c9 --- /dev/null +++ b/releasenotes/notes/feat-symdb-enabled-by-default-25eebb43fc8c5a0d.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Symbol Database is now enabled by default. diff --git a/releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml b/releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml new file mode 100644 index 00000000000..132e7efaab5 --- /dev/null +++ b/releasenotes/notes/fix-internal-logging-init-6058c02b527cdf77.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + internal: Fix ``ddtrace`` internal logger initialization mutating an unlocked shared resource. diff --git a/releasenotes/notes/iast-feat-xss-django-6781a8b9a4092832.yaml b/releasenotes/notes/iast-feat-xss-django-6781a8b9a4092832.yaml new file mode 100644 index 00000000000..c29e9f49936 --- /dev/null +++ b/releasenotes/notes/iast-feat-xss-django-6781a8b9a4092832.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Code Security (IAST): XSS detection for Django applications and Jinja2 (Flask and FastAPI applications), + which will be displayed on your DataDog Vulnerability Explorer dashboard. + See the `Application Vulnerability Management `_ documentation for more information about this feature. diff --git a/releasenotes/notes/langchain-drop-logs-metrics-a997e8059886b20a.yaml b/releasenotes/notes/langchain-drop-logs-metrics-a997e8059886b20a.yaml new file mode 100644 index 00000000000..e7099dd1a77 --- /dev/null +++ b/releasenotes/notes/langchain-drop-logs-metrics-a997e8059886b20a.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + langchain: Removes prompt-completion log sampling from the LangChain integration. To continue logging prompt completions, + enable LLM Observability. + - | + langchain: Removes integration metrics from the LangChain integration. To continue tracking operational metrics from the + OpenAI integration, enable LLM Observability or use trace metrics instead. diff --git a/releasenotes/notes/remove-deprecated-tracing-configs-c6711b57037576f6.yaml b/releasenotes/notes/remove-deprecated-tracing-configs-c6711b57037576f6.yaml new file mode 100644 index 00000000000..b47613d504e --- /dev/null +++ b/releasenotes/notes/remove-deprecated-tracing-configs-c6711b57037576f6.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + configurations: Drops support for deprecated tracing configurations. The following configurations are no longer supported: + - DD_TRACE_SAMPLE_RATE, use DD_TRACE_SAMPLING_RULES instead. + - DD_TRACE_API_VERSION=v0.3, the default ``v0.5`` version is used instead. + - DD_ANALYTICS_ENABLED, Datadog Analytics is no longer supported. + - DD_TRACE_ANALYTICS_ENABLED, Datadog Analytics is no longer supported. + - DD_HTTP_CLIENT_TAG_QUERY_STRING, DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING should be used instead. + - DD_TRACE_SPAN_AGGREGATOR_RLOCK, disabling the span aggregator rlock is no longer supported. diff --git a/releasenotes/notes/remove-deprecated-tracing-modules-a129231d42e1218d.yaml b/releasenotes/notes/remove-deprecated-tracing-modules-a129231d42e1218d.yaml new file mode 100644 index 00000000000..575bb8f1e55 --- /dev/null +++ b/releasenotes/notes/remove-deprecated-tracing-modules-a129231d42e1218d.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + tracing: Removes the deprecated tracing modules and constants from the ``ddtrace`` package. diff --git a/releasenotes/notes/remove-deprecations-from-pin-a700ae0cb6d51d03.yaml b/releasenotes/notes/remove-deprecations-from-pin-a700ae0cb6d51d03.yaml new file mode 100644 index 00000000000..0401cdc4258 --- /dev/null +++ b/releasenotes/notes/remove-deprecations-from-pin-a700ae0cb6d51d03.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + tracing: Removes support for overriding the global tracer in ``ddtrace.trace.Pin`` \ No newline at end of file diff --git a/releasenotes/notes/remove-openai-metrics-logs-656c6ba8e2e07ea3.yaml b/releasenotes/notes/remove-openai-metrics-logs-656c6ba8e2e07ea3.yaml new file mode 100644 index 00000000000..63153702d16 --- /dev/null +++ b/releasenotes/notes/remove-openai-metrics-logs-656c6ba8e2e07ea3.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + openai: Removes prompt-completion log sampling from the OpenAI integration. To continue logging prompt completions, + enable LLM Observability. + - | + openai: Removes integration metrics from the OpenAI integration. To continue tracking operational metrics from the + OpenAI integration, enable LLM Observability or use trace metrics instead. diff --git a/releasenotes/notes/remove-tracing-attrs-3-0-5743fa668289d5bc.yaml b/releasenotes/notes/remove-tracing-attrs-3-0-5743fa668289d5bc.yaml new file mode 100644 index 00000000000..35ee9378801 --- /dev/null +++ b/releasenotes/notes/remove-tracing-attrs-3-0-5743fa668289d5bc.yaml @@ -0,0 +1,15 @@ +--- +upgrade: + - | + tracer: Removes deprecated parameters from ``Tracer.configure(...)`` method and removes the ``Tracer.sampler`` attribute. + - | + tracing: Drops support for multiple tracer instances, ``ddtrace.trace.Tracer`` can not be reinitialized. + - | + span: Removes the deprecated ``Span.sampled`` property + - | + sampling: Drops support for configuring sampling rules using functions and regex in the ``ddtrace.tracer.sampler.rules[].choose_matcher(...)`` method + and removes the ``timestamp_ns`` parameter from ``ddtrace.internal.rate_limiter.RateLimiter.is_allowed()``. + - | + configurations: Drops support for configuring ``DD_TRACE_METHODS`` with the '[]' notation. Ensure DD_TRACE_METHODS use the ':' notation instead". + - | + opentracing: Removes the deprecated ``ddtracer`` parameter from ``ddtrace.opentracer.tracer.Tracer()``. \ No newline at end of file diff --git a/releasenotes/notes/remove-yaaredis-095441532392e12f.yaml b/releasenotes/notes/remove-yaaredis-095441532392e12f.yaml new file mode 100644 index 00000000000..ce5ae6da6c1 --- /dev/null +++ b/releasenotes/notes/remove-yaaredis-095441532392e12f.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + yaaredis: Removes the yaaredis integration from ``ddtrace``. For yaaredis support pin ddtrace to an older version. \ No newline at end of file diff --git a/releasenotes/notes/remove_unneeded_unregister-ad20120201768a7e.yaml b/releasenotes/notes/remove_unneeded_unregister-ad20120201768a7e.yaml new file mode 100644 index 00000000000..1a5dc451340 --- /dev/null +++ b/releasenotes/notes/remove_unneeded_unregister-ad20120201768a7e.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + logging: Resolves an an unneeded info log being logged on process exit + due to a forksafe hook being unregistered that was never registered to begin with. diff --git a/riotfile.py b/riotfile.py index a13480f0fbf..d66b317b1c7 100644 --- a/riotfile.py +++ b/riotfile.py @@ -2091,25 +2091,6 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "pytest-randomly": latest, }, ), - Venv( - name="yaaredis", - command="pytest {cmdargs} tests/contrib/yaaredis", - pkgs={ - "pytest-asyncio": "==0.21.1", - "pytest-randomly": latest, - }, - venvs=[ - Venv( - pys=select_pys(min_version="3.8", max_version="3.9"), - pkgs={"yaaredis": ["~=2.0.0", latest]}, - ), - Venv( - # yaaredis added support for Python 3.10 in 3.0 - pys="3.10", - pkgs={"yaaredis": latest}, - ), - ], - ), Venv( name="sanic", command="pytest {cmdargs} tests/contrib/sanic", @@ -2802,6 +2783,16 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, pys=select_pys(min_version="3.8"), ), + Venv( + name="valkey", + command="pytest {cmdargs} tests/contrib/valkey", + pkgs={ + "valkey": latest, + "pytest-randomly": latest, + "pytest-asyncio": "==0.23.7", + }, + pys=select_pys(min_version="3.8"), + ), Venv( name="profile", # NB riot commands that use this Venv must include --pass-env to work properly diff --git a/scripts/iast/mod_leak_functions.py b/scripts/iast/mod_leak_functions.py index bf96d93c497..f53e7aa2e94 100644 --- a/scripts/iast/mod_leak_functions.py +++ b/scripts/iast/mod_leak_functions.py @@ -258,6 +258,7 @@ def sink_points(string_tainted): except Exception: pass + _ = eval(f"'a' + '{string_tainted}'") # Weak Randomness vulnerability _ = random.randint(1, 10) diff --git a/supported_versions_output.json b/supported_versions_output.json index a51bb17bb9a..7b158105f97 100644 --- a/supported_versions_output.json +++ b/supported_versions_output.json @@ -295,15 +295,15 @@ "auto-instrumented": false }, { - "integration": "vertexai", - "minimum_tracer_supported": "1.71.1", - "max_tracer_supported": "1.71.1", + "integration": "valkey", + "minimum_tracer_supported": "6.0.0", + "max_tracer_supported": "6.0.2", "auto-instrumented": true }, { - "integration": "yaaredis", - "minimum_tracer_supported": "2.0.4", - "max_tracer_supported": "3.0.0", + "integration": "vertexai", + "minimum_tracer_supported": "1.71.1", + "max_tracer_supported": "1.71.1", "auto-instrumented": true } ] \ No newline at end of file diff --git a/supported_versions_table.csv b/supported_versions_table.csv index 3f7384a0cdd..1f230039d50 100644 --- a/supported_versions_table.csv +++ b/supported_versions_table.csv @@ -47,5 +47,5 @@ starlette,0.13.6,0.41.3,True structlog,20.2.0,24.4.0,False tornado *,4.5.3,6.4,False urllib3,1.24.3,2.2.3,False +valkey,6.0.0,6.0.2,True vertexai,1.71.1,1.71.1,True -yaaredis,2.0.4,3.0.0,True diff --git a/tests/appsec/contrib_appsec/django_app/urls.py b/tests/appsec/contrib_appsec/django_app/urls.py index 3ca8557c550..bb93a34ff8a 100644 --- a/tests/appsec/contrib_appsec/django_app/urls.py +++ b/tests/appsec/contrib_appsec/django_app/urls.py @@ -196,7 +196,7 @@ def login_user(request): def new_service(request, service_name: str): import ddtrace - ddtrace.trace.Pin.override(django, service=service_name, tracer=ddtrace.tracer) + ddtrace.trace.Pin._override(django, service=service_name, tracer=ddtrace.tracer) return HttpResponse(service_name, status=200) diff --git a/tests/appsec/contrib_appsec/fastapi_app/app.py b/tests/appsec/contrib_appsec/fastapi_app/app.py index c636e65b7c3..ddefe6cf33c 100644 --- a/tests/appsec/contrib_appsec/fastapi_app/app.py +++ b/tests/appsec/contrib_appsec/fastapi_app/app.py @@ -104,7 +104,7 @@ async def multi_view_no_param(request: Request): # noqa: B008 async def new_service(service_name: str, request: Request): # noqa: B008 import ddtrace - ddtrace.trace.Pin.override(app, service=service_name, tracer=ddtrace.tracer) + ddtrace.trace.Pin._override(app, service=service_name, tracer=ddtrace.tracer) return HTMLResponse(service_name, 200) async def slow_numbers(minimum, maximum): diff --git a/tests/appsec/contrib_appsec/flask_app/app.py b/tests/appsec/contrib_appsec/flask_app/app.py index 32228375f37..ef2fcb0ce2c 100644 --- a/tests/appsec/contrib_appsec/flask_app/app.py +++ b/tests/appsec/contrib_appsec/flask_app/app.py @@ -55,7 +55,7 @@ def multi_view(param_int=0, param_str=""): def new_service(service_name: str): import ddtrace - ddtrace.trace.Pin.override(Flask, service=service_name, tracer=ddtrace.tracer) + ddtrace.trace.Pin._override(Flask, service=service_name, tracer=ddtrace.tracer) return service_name diff --git a/tests/appsec/contrib_appsec/test_flask.py b/tests/appsec/contrib_appsec/test_flask.py index b497de98bf9..690b893c89f 100644 --- a/tests/appsec/contrib_appsec/test_flask.py +++ b/tests/appsec/contrib_appsec/test_flask.py @@ -37,7 +37,7 @@ def setUp(self): self.app = app self.app.test_client_class = DDFlaskTestClient self.client = self.app.test_client() - Pin.override(self.app, tracer=self.tracer) + Pin._override(self.app, tracer=self.tracer) def tearDown(self): super(BaseFlaskTestCase, self).tearDown() diff --git a/tests/appsec/contrib_appsec/utils.py b/tests/appsec/contrib_appsec/utils.py index 5cf5e1da6b1..e510ed34029 100644 --- a/tests/appsec/contrib_appsec/utils.py +++ b/tests/appsec/contrib_appsec/utils.py @@ -1568,7 +1568,7 @@ def test_tracer(): @contextmanager def post_tracer(interface): original_tracer = getattr(ddtrace.trace.Pin.get_from(interface.framework), "tracer", None) - ddtrace.trace.Pin.override(interface.framework, tracer=interface.tracer) + ddtrace.trace.Pin._override(interface.framework, tracer=interface.tracer) yield if original_tracer is not None: - ddtrace.trace.Pin.override(interface.framework, tracer=original_tracer) + ddtrace.trace.Pin._override(interface.framework, tracer=original_tracer) diff --git a/tests/appsec/iast/taint_sinks/_taint_sinks_utils.py b/tests/appsec/iast/taint_sinks/_taint_sinks_utils.py index 288b72d015c..ae69fbda120 100644 --- a/tests/appsec/iast/taint_sinks/_taint_sinks_utils.py +++ b/tests/appsec/iast/taint_sinks/_taint_sinks_utils.py @@ -22,6 +22,7 @@ def get_parametrize(vuln_type, ignore_list=None): "$1 - Tainted range based redaction - multiple ranges", "Redacted source that needs to be truncated", "Query with single quoted string literal and null source", + "No redacted that needs to be truncated - whole text", ): continue diff --git a/tests/appsec/iast/taint_sinks/test_xss_redacted.py b/tests/appsec/iast/taint_sinks/test_xss_redacted.py new file mode 100644 index 00000000000..c192962e53e --- /dev/null +++ b/tests/appsec/iast/taint_sinks/test_xss_redacted.py @@ -0,0 +1,48 @@ +import os + +import pytest + +from ddtrace.appsec._iast._taint_tracking import origin_to_str +from ddtrace.appsec._iast._taint_tracking import str_to_origin +from ddtrace.appsec._iast.constants import VULN_XSS +from ddtrace.appsec._iast.taint_sinks.xss import XSS +from tests.appsec.iast.taint_sinks._taint_sinks_utils import _taint_pyobject_multiranges +from tests.appsec.iast.taint_sinks._taint_sinks_utils import get_parametrize +from tests.appsec.iast.taint_sinks.conftest import _get_iast_data + + +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +@pytest.mark.parametrize( + "evidence_input, sources_expected, vulnerabilities_expected,element", list(get_parametrize(VULN_XSS)) +) +def test_xss_redaction_suite( + evidence_input, sources_expected, vulnerabilities_expected, iast_context_defaults, element +): + tainted_object = evidence_input_value = evidence_input.get("value", "") + if evidence_input_value: + tainted_object = _taint_pyobject_multiranges( + evidence_input_value, + [ + ( + input_ranges["iinfo"]["parameterName"], + input_ranges["iinfo"]["parameterValue"], + str_to_origin(input_ranges["iinfo"]["type"]), + input_ranges["start"], + input_ranges["end"] - input_ranges["start"], + ) + for input_ranges in evidence_input.get("ranges", {}) + ], + ) + + XSS.report(tainted_object) + + data = _get_iast_data() + vulnerability = list(data["vulnerabilities"])[0] + source = list(data["sources"])[0] + source["origin"] = origin_to_str(source["origin"]) + + assert vulnerability["type"] == VULN_XSS + assert vulnerability["evidence"] == vulnerabilities_expected["evidence"] + assert source == sources_expected diff --git a/tests/appsec/iast/test_processor.py b/tests/appsec/iast/test_processor.py index 3bb5eaa5015..4f9b912ffc2 100644 --- a/tests/appsec/iast/test_processor.py +++ b/tests/appsec/iast/test_processor.py @@ -51,7 +51,7 @@ def test_appsec_iast_processor_ensure_span_is_manual_keep(iast_context_defaults, test_appsec_iast_processor_ensure_span_is_manual_keep. This test throws 'finished span not connected to a trace' log error """ - with override_env(dict(DD_TRACE_SAMPLE_RATE=sampling_rate)): + with override_env({"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":%s]"}]' % (sampling_rate,)}): oce.reconfigure() tracer = DummyTracer(iast_enabled=True) @@ -59,7 +59,6 @@ def test_appsec_iast_processor_ensure_span_is_manual_keep(iast_context_defaults, tracer._on_span_finish(span) result = span.get_tag(IAST.JSON) - assert len(json.loads(result)["vulnerabilities"]) == 1 assert span.get_metric(_SAMPLING_PRIORITY_KEY) is USER_KEEP diff --git a/tests/appsec/iast_packages/test_packages.py b/tests/appsec/iast_packages/test_packages.py index 83e53ae92c9..d65cf2ea709 100644 --- a/tests/appsec/iast_packages/test_packages.py +++ b/tests/appsec/iast_packages/test_packages.py @@ -626,7 +626,7 @@ def uninstall(self, python_cmd): "", import_module_to_validate="soupsieve.css_match", extras=[("beautifulsoup4", "4.12.3")], - skip_python_version=[(3, 6), (3, 7), (3, 8)], + skip_python_version=[(3, 8)], test_propagation=True, fixme_propagation_fails=True, ), @@ -638,7 +638,7 @@ def uninstall(self, python_cmd): # "Original password: your-password\nHashed password: replaced_hashed\nPassword match: True", # "", # import_module_to_validate="werkzeug.http", - # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # skip_python_version=[(3, 8)], # ), PackageForTesting( "yarl", @@ -648,7 +648,7 @@ def uninstall(self, python_cmd): + " example.com\nPath: /path\nQuery: \n", "", import_module_to_validate="yarl._url", - skip_python_version=[(3, 6), (3, 7), (3, 8)], + skip_python_version=[(3, 8)], test_propagation=True, fixme_propagation_fails=True, ), @@ -659,7 +659,7 @@ def uninstall(self, python_cmd): # "example.zip", # "Contents of example.zip: ['example.zip/example.txt']", # "", - # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # skip_python_version=[(3, 8)], # ), ## Skip due to typing-extensions added to the denylist # PackageForTesting( @@ -670,7 +670,7 @@ def uninstall(self, python_cmd): # "", # import_name="typing_extensions", # test_e2e=False, - # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # skip_python_version=[(3, 8)], # ), PackageForTesting( "six", @@ -678,7 +678,7 @@ def uninstall(self, python_cmd): "", "We're in Python 3", "", - skip_python_version=[(3, 6), (3, 7), (3, 8)], + skip_python_version=[(3, 8)], ), ## Skip due to pillow added to the denylist # PackageForTesting( @@ -688,7 +688,7 @@ def uninstall(self, python_cmd): # "Image correctly generated", # "", # import_name="PIL.Image", - # skip_python_version=[(3, 6), (3, 7), (3, 8)], + # skip_python_version=[(3, 8)], # ), PackageForTesting( "aiobotocore", "2.13.0", "", "", "", test_e2e=False, test_import=False, import_name="aiobotocore.session" @@ -853,7 +853,7 @@ def uninstall(self, python_cmd): "Processed value: 15", "", import_name="annotated_types", - skip_python_version=[(3, 6), (3, 7), (3, 8)], + skip_python_version=[(3, 8)], ), ] diff --git a/tests/appsec/integrations/django_tests/conftest.py b/tests/appsec/integrations/django_tests/conftest.py index d150edf68be..688c09f0ce4 100644 --- a/tests/appsec/integrations/django_tests/conftest.py +++ b/tests/appsec/integrations/django_tests/conftest.py @@ -5,12 +5,14 @@ import pytest from ddtrace.appsec._iast import enable_iast_propagation -from ddtrace.contrib.internal.django.patch import patch +from ddtrace.appsec._iast._patch_modules import patch_iast +from ddtrace.contrib.internal.django.patch import patch as django_patch from ddtrace.trace import Pin from tests.appsec.iast.conftest import _end_iast_context_and_oce from tests.appsec.iast.conftest import _start_iast_context_and_oce from tests.utils import DummyTracer from tests.utils import TracerSpanContainer +from tests.utils import override_env from tests.utils import override_global_config @@ -25,20 +27,31 @@ def pytest_configure(): _iast_deduplication_enabled=False, _iast_request_sampling=100.0, ) - ): + ), override_env(dict(_DD_IAST_PATCH_MODULES="tests.appsec.integrations")): settings.DEBUG = False + patch_iast() + django_patch() enable_iast_propagation() - patch() django.setup() +@pytest.fixture +def debug_mode(): + from django.conf import settings + + original_debug = settings.DEBUG + settings.DEBUG = True + yield + settings.DEBUG = original_debug + + @pytest.fixture def tracer(): tracer = DummyTracer() # Patch Django and override tracer to be our test tracer pin = Pin.get_from(django) original_tracer = pin.tracer - Pin.override(django, tracer=tracer) + Pin._override(django, tracer=tracer) # Yield to our test yield tracer @@ -47,7 +60,7 @@ def tracer(): # Reset the tracer pinned to Django and unpatch # DEV: unable to properly unpatch and reload django app with each test # unpatch() - Pin.override(django, tracer=original_tracer) + Pin._override(django, tracer=original_tracer) @pytest.fixture diff --git a/tests/appsec/integrations/django_tests/django_app/settings.py b/tests/appsec/integrations/django_tests/django_app/settings.py index 836c7602c5f..cbd7eea9c25 100644 --- a/tests/appsec/integrations/django_tests/django_app/settings.py +++ b/tests/appsec/integrations/django_tests/django_app/settings.py @@ -40,7 +40,7 @@ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [ - os.path.join(BASE_DIR, "templates"), + os.path.join(BASE_DIR, "django_app", "templates"), ], "APP_DIRS": True, "OPTIONS": { diff --git a/tests/appsec/integrations/django_tests/django_app/templates/index.html b/tests/appsec/integrations/django_tests/django_app/templates/index.html new file mode 100644 index 00000000000..7135619ca9d --- /dev/null +++ b/tests/appsec/integrations/django_tests/django_app/templates/index.html @@ -0,0 +1,5 @@ + + +

Input: {{ user_input }}

+ + \ No newline at end of file diff --git a/tests/appsec/integrations/django_tests/django_app/templates/index_autoescape.html b/tests/appsec/integrations/django_tests/django_app/templates/index_autoescape.html new file mode 100644 index 00000000000..ef5f5a64ed4 --- /dev/null +++ b/tests/appsec/integrations/django_tests/django_app/templates/index_autoescape.html @@ -0,0 +1,7 @@ + + +

{% autoescape on %} + {{ user_input }} +{% endautoescape %}

+ + diff --git a/tests/appsec/integrations/django_tests/django_app/templates/index_safe.html b/tests/appsec/integrations/django_tests/django_app/templates/index_safe.html new file mode 100644 index 00000000000..8bc39da3351 --- /dev/null +++ b/tests/appsec/integrations/django_tests/django_app/templates/index_safe.html @@ -0,0 +1,5 @@ + + +

Input: {{ user_input|safe }}

+ + \ No newline at end of file diff --git a/tests/appsec/integrations/django_tests/django_app/urls.py b/tests/appsec/integrations/django_tests/django_app/urls.py index dd1d069ad77..6a77fe99808 100644 --- a/tests/appsec/integrations/django_tests/django_app/urls.py +++ b/tests/appsec/integrations/django_tests/django_app/urls.py @@ -46,6 +46,11 @@ def shutdown(request): views.sqli_http_request_parameter_name_post, name="sqli_http_request_parameter_name_post", ), + handler( + "appsec/sqli_query_no_redacted/$", + views.sqli_query_no_redacted, + name="sqli_query_no_redacted", + ), handler( "appsec/sqli_http_request_header_name/$", views.sqli_http_request_header_name, @@ -73,6 +78,10 @@ def shutdown(request): handler("appsec/insecure-cookie/test_insecure/$", views.view_insecure_cookies_insecure), handler("appsec/insecure-cookie/test_secure/$", views.view_insecure_cookies_secure), handler("appsec/insecure-cookie/test_empty_cookie/$", views.view_insecure_cookies_empty), + handler("appsec/xss/$", views.xss_http_request_parameter_mark_safe), + handler("appsec/xss/secure/$", views.xss_secure), + handler("appsec/xss/safe/$", views.xss_http_request_parameter_template_safe), + handler("appsec/xss/autoscape/$", views.xss_http_request_parameter_autoscape), path( "appsec/sqli_http_path_parameter//", views.sqli_http_path_parameter, diff --git a/tests/appsec/integrations/django_tests/django_app/views.py b/tests/appsec/integrations/django_tests/django_app/views.py index 693a9eab365..020f127753b 100644 --- a/tests/appsec/integrations/django_tests/django_app/views.py +++ b/tests/appsec/integrations/django_tests/django_app/views.py @@ -8,6 +8,8 @@ from django.db import connection from django.http import HttpResponse from django.http import JsonResponse +from django.shortcuts import render +from django.utils.safestring import mark_safe from ddtrace.appsec import _asm_request_context from ddtrace.appsec._iast._taint_tracking import OriginType @@ -68,6 +70,34 @@ def checkuser_view(request, user_id): return HttpResponse(status=200) +def xss_http_request_parameter_mark_safe(request): + user_input = request.GET.get("input", "") + + # label xss_http_request_parameter_mark_safe + return render(request, "index.html", {"user_input": mark_safe(user_input)}) + + +def xss_secure(request): + user_input = request.GET.get("input", "") + + # label xss_http_request_parameter_mark_safe + return render(request, "index.html", {"user_input": user_input}) + + +def xss_http_request_parameter_template_safe(request): + user_input = request.GET.get("input", "") + + # label xss_http_request_parameter_template_safe + return render(request, "index_safe.html", {"user_input": user_input}) + + +def xss_http_request_parameter_autoscape(request): + user_input = request.GET.get("input", "") + + # label xss_http_request_parameter_autoscape + return render(request, "index_autoescape.html", {"user_input": user_input}) + + def sqli_http_request_parameter(request): import bcrypt from django.contrib.auth.hashers import BCryptSHA256PasswordHasher @@ -99,6 +129,14 @@ def sqli_http_request_parameter_name_post(request): return HttpResponse(request.META["HTTP_USER_AGENT"], status=200) +def sqli_query_no_redacted(request): + obj = request.GET["q"] + with connection.cursor() as cursor: + # label sqli_query_no_redacted + cursor.execute(f"SELECT * FROM {obj} ORDER BY name") + return HttpResponse("OK", status=200) + + def sqli_http_request_header_name(request): key = [x for x in request.META.keys() if x == "master"][0] diff --git a/tests/appsec/integrations/django_tests/test_django_appsec_iast.py b/tests/appsec/integrations/django_tests/test_django_appsec_iast.py index dd400c64df6..2999a286483 100644 --- a/tests/appsec/integrations/django_tests/test_django_appsec_iast.py +++ b/tests/appsec/integrations/django_tests/test_django_appsec_iast.py @@ -3,20 +3,16 @@ import pytest -from ddtrace.appsec._asm_request_context import start_context +from ddtrace.appsec._common_module_patches import patch_common_modules from ddtrace.appsec._constants import IAST -from ddtrace.appsec._iast import oce -from ddtrace.appsec._iast._patch_modules import patch_iast from ddtrace.appsec._iast.constants import VULN_CMDI from ddtrace.appsec._iast.constants import VULN_HEADER_INJECTION from ddtrace.appsec._iast.constants import VULN_INSECURE_COOKIE from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.appsec._iast.constants import VULN_STACKTRACE_LEAK -from ddtrace.ext import SpanTypes from ddtrace.internal.compat import urlencode from ddtrace.settings.asm import config as asm_config from tests.appsec.iast.iast_utils import get_line_and_hash -from tests.utils import override_env from tests.utils import override_global_config @@ -25,7 +21,9 @@ @pytest.fixture(autouse=True) def iast_context(): - with override_env({IAST.ENV: "True", IAST.ENV_REQUEST_SAMPLING: "100", "DD_IAST_DEDUPLICATION_ENABLED": "false"}): + with override_global_config( + dict(_iast_enabled=True, _iast_deduplication_enabled=False, _iast_request_sampling=100.0) + ): yield @@ -85,32 +83,28 @@ def _aux_appsec_get_root_span_with_exception( @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_weak_hash(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - patch_iast({"weak_hash": True}) - root_span, _ = _aux_appsec_get_root_span(client, test_spans, tracer, url="/appsec/weak-hash/") - str_json = root_span.get_tag(IAST.JSON) - assert str_json is not None, "no JSON tag in root span" - vulnerability = json.loads(str_json)["vulnerabilities"][0] - assert vulnerability["location"]["path"].endswith(TEST_FILE) - assert vulnerability["evidence"]["value"] == "md5" + root_span, _ = _aux_appsec_get_root_span(client, test_spans, tracer, url="/appsec/weak-hash/") + str_json = root_span.get_tag(IAST.JSON) + assert str_json is not None, "no JSON tag in root span" + vulnerability = json.loads(str_json)["vulnerabilities"][0] + assert vulnerability["location"]["path"].endswith(TEST_FILE) + assert vulnerability["evidence"]["value"] == "md5" @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_tainted_user_agent_iast_enabled(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), - content_type="application/x-www-form-urlencoded", - url="/appsec/taint-checking-enabled/?q=aaa", - headers={"HTTP_USER_AGENT": "test/1.2.3"}, - ) + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), + content_type="application/x-www-form-urlencoded", + url="/appsec/taint-checking-enabled/?q=aaa", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) - assert response.status_code == 200 - assert response.content == b"test/1.2.3" + assert response.status_code == 200 + assert response.content == b"test/1.2.3" @pytest.mark.parametrize( @@ -156,8 +150,6 @@ def test_django_view_with_exception(client, test_spans, tracer, payload, content @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_tainted_user_agent_iast_disabled(client, test_spans, tracer): with override_global_config(dict(_iast_enabled=False, _iast_deduplication_enabled=False)): - oce.reconfigure() - root_span, response = _aux_appsec_get_root_span( client, test_spans, @@ -176,192 +168,222 @@ def test_django_tainted_user_agent_iast_disabled(client, test_spans, tracer): @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") -def test_django_tainted_user_agent_iast_enabled_sqli_http_request_parameter(client, test_spans, tracer): - with override_global_config( - dict(_iast_enabled=True, _iast_deduplication_enabled=False, _iast_request_sampling=100.0) - ): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), - content_type="application/x-www-form-urlencoded", - url="/appsec/sqli_http_request_parameter/?q=SELECT 1 FROM sqlite_master WHERE name='", - headers={"HTTP_USER_AGENT": "test/1.2.3"}, - ) +def test_django_sqli_http_request_parameter(client, test_spans, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_parameter/?q=SELECT 1 FROM sqlite_master WHERE name='", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) - vuln_type = "SQL_INJECTION" + vuln_type = "SQL_INJECTION" - assert response.status_code == 200 - assert response.content == b"test/1.2.3" + assert response.status_code == 200 + assert response.content == b"test/1.2.3" - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash("iast_enabled_sqli_http_request_parameter", vuln_type, filename=TEST_FILE) + line, hash_value = get_line_and_hash("iast_enabled_sqli_http_request_parameter", vuln_type, filename=TEST_FILE) - assert loaded["sources"] == [ - { - "name": "q", - "origin": "http.request.parameter", - "pattern": "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", - "redacted": True, - } - ] - - assert loaded["vulnerabilities"][0]["type"] == vuln_type - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"source": 0, "value": "SELECT "}, - {"pattern": "h", "redacted": True, "source": 0}, - {"source": 0, "value": " FROM sqlite_master WHERE name='"}, - {"redacted": True}, - {"value": "'"}, - ] + assert loaded["sources"] == [ + { + "name": "q", + "origin": "http.request.parameter", + "pattern": "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN", + "redacted": True, } - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["hash"] == hash_value + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": "SELECT "}, + {"pattern": "h", "redacted": True, "source": 0}, + {"source": 0, "value": " FROM sqlite_master WHERE name='"}, + {"redacted": True}, + {"value": "'"}, + ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_request_parameter_name_get(client, test_spans, tracer): - with override_global_config( - dict(_iast_enabled=True, _iast_deduplication_enabled=False, _iast_request_sampling=100.0) - ): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - content_type="application/x-www-form-urlencoded", - url="/appsec/sqli_http_request_parameter_name_get/?SELECT=unused", - headers={"HTTP_USER_AGENT": "test/1.2.3"}, - ) + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_parameter_name_get/?SELECT=unused", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) - vuln_type = "SQL_INJECTION" + vuln_type = "SQL_INJECTION" - assert response.status_code == 200 - assert response.content == b"test/1.2.3" + assert response.status_code == 200 + assert response.content == b"test/1.2.3" - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash( - "iast_enabled_sqli_http_request_parameter_name_get", vuln_type, filename=TEST_FILE - ) + line, hash_value = get_line_and_hash( + "iast_enabled_sqli_http_request_parameter_name_get", vuln_type, filename=TEST_FILE + ) - assert loaded["sources"] == [ + assert loaded["sources"] == [ + { + "name": "SELECT", + "origin": "http.request.parameter.name", + "value": "SELECT", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": "SELECT"}, + { + "value": " ", + }, { - "name": "SELECT", - "origin": "http.request.parameter.name", - "value": "SELECT", - } + "redacted": True, + }, ] - - assert loaded["vulnerabilities"][0]["type"] == vuln_type - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"source": 0, "value": "SELECT"}, - { - "value": " ", - }, - { - "redacted": True, - }, - ] - } - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["hash"] == hash_value + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_request_parameter_name_post(client, test_spans, tracer): - with override_global_config( - dict(_iast_enabled=True, _iast_deduplication_enabled=False, _iast_request_sampling=100.0) - ): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - payload=urlencode({"SELECT": "unused"}), - content_type="application/x-www-form-urlencoded", - url="/appsec/sqli_http_request_parameter_name_post/", - headers={"HTTP_USER_AGENT": "test/1.2.3"}, - ) + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + payload=urlencode({"SELECT": "unused"}), + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_parameter_name_post/", + headers={"HTTP_USER_AGENT": "test/1.2.3"}, + ) - vuln_type = "SQL_INJECTION" + vuln_type = "SQL_INJECTION" - assert response.status_code == 200 - assert response.content == b"test/1.2.3" + assert response.status_code == 200 + assert response.content == b"test/1.2.3" - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash( - "iast_enabled_sqli_http_request_parameter_name_post", vuln_type, filename=TEST_FILE - ) + line, hash_value = get_line_and_hash( + "iast_enabled_sqli_http_request_parameter_name_post", vuln_type, filename=TEST_FILE + ) - assert loaded["sources"] == [ + assert loaded["sources"] == [ + { + "name": "SELECT", + "origin": "http.request.parameter.name", + "value": "SELECT", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": "SELECT"}, + { + "value": " ", + }, { - "name": "SELECT", - "origin": "http.request.parameter.name", - "value": "SELECT", - } + "redacted": True, + }, ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + - assert loaded["vulnerabilities"][0]["type"] == vuln_type - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"source": 0, "value": "SELECT"}, - { - "value": " ", - }, - { - "redacted": True, - }, - ] +@pytest.mark.django_db() +@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") +def test_django_sqli_query_no_redacted(client, test_spans, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/sqli_query_no_redacted/?q=sqlite_master", + ) + + vuln_type = "SQL_INJECTION" + + assert response.status_code == 200 + assert response.content == b"OK" + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + + line, hash_value = get_line_and_hash("sqli_query_no_redacted", vuln_type, filename=TEST_FILE) + + assert loaded["sources"] == [ + { + "name": "q", + "origin": "http.request.parameter", + "value": "sqlite_master", } - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["hash"] == hash_value + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"value": "SELECT * FROM "}, + {"source": 0, "value": "sqlite_master"}, + {"value": " ORDER BY name"}, + ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_request_header_value(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), - content_type="application/x-www-form-urlencoded", - url="/appsec/sqli_http_request_header_value/", - headers={"HTTP_USER_AGENT": "master"}, - ) + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_header_value/", + headers={"HTTP_USER_AGENT": "master"}, + ) - assert response.status_code == 200 - assert response.content == b"master" + assert response.status_code == 200 + assert response.content == b"master" - loaded = json.loads(root_span.get_tag(IAST.JSON)) - - assert loaded["sources"] == [{"origin": "http.request.header", "name": "HTTP_USER_AGENT", "value": "master"}] - assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"value": "SELECT "}, - {"redacted": True}, - {"value": " FROM sqlite_"}, - {"source": 0, "value": "master"}, - ] - } + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash( - "iast_enabled_sqli_http_request_header_value", VULN_SQL_INJECTION, filename=TEST_FILE - ) - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["hash"] == hash_value + assert loaded["sources"] == [{"origin": "http.request.header", "name": "HTTP_USER_AGENT", "value": "master"}] + assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " FROM sqlite_"}, + {"source": 0, "value": "master"}, + ] + } + + line, hash_value = get_line_and_hash( + "iast_enabled_sqli_http_request_header_value", VULN_SQL_INJECTION, filename=TEST_FILE + ) + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value @pytest.mark.django_db() @@ -387,39 +409,38 @@ def test_django_iast_disabled_sqli_http_request_header_value(client, test_spans, @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_request_header_name(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), - content_type="application/x-www-form-urlencoded", - url="/appsec/sqli_http_request_header_name/", - headers={"master": "test/1.2.3"}, - ) + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + payload=urlencode({"mytestingbody_key": "mytestingbody_value"}), + content_type="application/x-www-form-urlencoded", + url="/appsec/sqli_http_request_header_name/", + headers={"master": "test/1.2.3"}, + ) - assert response.status_code == 200 - assert response.content == b"test/1.2.3" + assert response.status_code == 200 + assert response.content == b"test/1.2.3" - loaded = json.loads(root_span.get_tag(IAST.JSON)) - - assert loaded["sources"] == [{"origin": "http.request.header.name", "name": "master", "value": "master"}] - assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"value": "SELECT "}, - {"redacted": True}, - {"value": " FROM sqlite_"}, - {"value": "master", "source": 0}, - ] - } + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash( - "iast_enabled_sqli_http_request_header_name", VULN_SQL_INJECTION, filename=TEST_FILE - ) - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["hash"] == hash_value + assert loaded["sources"] == [{"origin": "http.request.header.name", "name": "master", "value": "master"}] + assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " FROM sqlite_"}, + {"value": "master", "source": 0}, + ] + } + + line, hash_value = get_line_and_hash( + "iast_enabled_sqli_http_request_header_name", VULN_SQL_INJECTION, filename=TEST_FILE + ) + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value @pytest.mark.django_db() @@ -498,41 +519,38 @@ def test_django_iast_disabled_sqli_http_path_parameter(client, test_spans, trace @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_cookies_name(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/sqli_http_request_cookie_name/", - cookies={"master": "test/1.2.3"}, - ) - assert response.status_code == 200 - assert response.content == b"test/1.2.3" + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/sqli_http_request_cookie_name/", + cookies={"master": "test/1.2.3"}, + ) + assert response.status_code == 200 + assert response.content == b"test/1.2.3" - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - vulnerability = False - for vuln in loaded["vulnerabilities"]: - if vuln["type"] == VULN_SQL_INJECTION: - vulnerability = vuln + vulnerability = False + for vuln in loaded["vulnerabilities"]: + if vuln["type"] == VULN_SQL_INJECTION: + vulnerability = vuln - assert vulnerability, "No {} reported".format(VULN_SQL_INJECTION) + assert vulnerability, "No {} reported".format(VULN_SQL_INJECTION) - assert loaded["sources"] == [{"origin": "http.request.cookie.name", "name": "master", "value": "master"}] - assert vulnerability["evidence"] == { - "valueParts": [ - {"value": "SELECT "}, - {"redacted": True}, - {"value": " FROM sqlite_"}, - {"value": "master", "source": 0}, - ] - } - line, hash_value = get_line_and_hash( - "iast_enabled_sqli_http_cookies_name", VULN_SQL_INJECTION, filename=TEST_FILE - ) - assert vulnerability["location"]["path"] == TEST_FILE - assert vulnerability["location"]["line"] == line - assert vulnerability["hash"] == hash_value + assert loaded["sources"] == [{"origin": "http.request.cookie.name", "name": "master", "value": "master"}] + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " FROM sqlite_"}, + {"value": "master", "source": 0}, + ] + } + line, hash_value = get_line_and_hash("iast_enabled_sqli_http_cookies_name", VULN_SQL_INJECTION, filename=TEST_FILE) + assert vulnerability["location"]["path"] == TEST_FILE + assert vulnerability["location"]["line"] == line + assert vulnerability["hash"] == hash_value @pytest.mark.django_db() @@ -556,43 +574,40 @@ def test_django_iast_disabled_sqli_http_cookies_name(client, test_spans, tracer) @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_cookies_value(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/sqli_http_request_cookie_value/", - cookies={"master": "master"}, - ) - assert response.status_code == 200 - assert response.content == b"master" + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/sqli_http_request_cookie_value/", + cookies={"master": "master"}, + ) + assert response.status_code == 200 + assert response.content == b"master" - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - vulnerability = False - for vuln in loaded["vulnerabilities"]: - if vuln["type"] == VULN_SQL_INJECTION: - vulnerability = vuln + vulnerability = False + for vuln in loaded["vulnerabilities"]: + if vuln["type"] == VULN_SQL_INJECTION: + vulnerability = vuln - assert vulnerability, "No {} reported".format(VULN_SQL_INJECTION) - assert loaded["sources"] == [{"origin": "http.request.cookie.value", "name": "master", "value": "master"}] - assert vulnerability["type"] == "SQL_INJECTION" + assert vulnerability, "No {} reported".format(VULN_SQL_INJECTION) + assert loaded["sources"] == [{"origin": "http.request.cookie.value", "name": "master", "value": "master"}] + assert vulnerability["type"] == "SQL_INJECTION" - assert vulnerability["evidence"] == { - "valueParts": [ - {"value": "SELECT "}, - {"redacted": True}, - {"value": " FROM sqlite_"}, - {"value": "master", "source": 0}, - ] - } + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " FROM sqlite_"}, + {"value": "master", "source": 0}, + ] + } - line, hash_value = get_line_and_hash( - "iast_enabled_sqli_http_cookies_value", VULN_SQL_INJECTION, filename=TEST_FILE - ) - assert vulnerability["location"]["line"] == line - assert vulnerability["location"]["path"] == TEST_FILE - assert vulnerability["hash"] == hash_value + line, hash_value = get_line_and_hash("iast_enabled_sqli_http_cookies_value", VULN_SQL_INJECTION, filename=TEST_FILE) + assert vulnerability["location"]["line"] == line + assert vulnerability["location"]["path"] == TEST_FILE + assert vulnerability["hash"] == hash_value @pytest.mark.django_db() @@ -623,35 +638,34 @@ def test_django_iast_disabled_sqli_http_cookies_value(client, test_spans, tracer @pytest.mark.django_db() @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_sqli_http_body(client, test_spans, tracer, payload, content_type): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/sqli_http_request_body/", - payload=payload, - content_type=content_type, - ) - loaded = json.loads(root_span.get_tag(IAST.JSON)) - - line, hash_value = get_line_and_hash("iast_enabled_sqli_http_body", VULN_SQL_INJECTION, filename=TEST_FILE) - - assert loaded["sources"] == [{"origin": "http.request.body", "name": "http.request.body", "value": "master"}] - assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION - assert loaded["vulnerabilities"][0]["hash"] == hash_value - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [ - {"value": "SELECT "}, - {"redacted": True}, - {"value": " FROM sqlite_"}, - {"value": "master", "source": 0}, - ] - } - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/sqli_http_request_body/", + payload=payload, + content_type=content_type, + ) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - assert response.status_code == 200 - assert response.content == b"master" + line, hash_value = get_line_and_hash("iast_enabled_sqli_http_body", VULN_SQL_INJECTION, filename=TEST_FILE) + + assert loaded["sources"] == [{"origin": "http.request.body", "name": "http.request.body", "value": "master"}] + assert loaded["vulnerabilities"][0]["type"] == VULN_SQL_INJECTION + assert loaded["vulnerabilities"][0]["hash"] == hash_value + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " FROM sqlite_"}, + {"value": "master", "source": 0}, + ] + } + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + + assert response.status_code == 200 + assert response.content == b"master" @pytest.mark.parametrize( @@ -718,244 +732,314 @@ def test_django_iast_disabled_sqli_http_body(client, test_spans, tracer): @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_querydict(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True)): - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/validate_querydict/?x=1&y=2&x=3", - ) + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/validate_querydict/?x=1&y=2&x=3", + ) - assert root_span.get_tag(IAST.JSON) is None - assert response.status_code == 200 - assert ( - response.content == b"x=['1', '3'], all=[('x', ['1', '3']), ('y', ['2'])]," - b" keys=['x', 'y'], urlencode=x=1&x=3&y=2" - ) + assert root_span.get_tag(IAST.JSON) is None + assert response.status_code == 200 + assert ( + response.content == b"x=['1', '3'], all=[('x', ['1', '3']), ('y', ['2'])]," + b" keys=['x', 'y'], urlencode=x=1&x=3&y=2" + ) @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_command_injection(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - patch_iast({"command_injection": True}) - from ddtrace.appsec._common_module_patches import patch_common_modules - - patch_common_modules() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/command-injection/", - payload="master", - content_type="application/json", - ) + patch_common_modules() + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/command-injection/", + payload="master", + content_type="application/json", + ) - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash("iast_command_injection", VULN_CMDI, filename=TEST_FILE) + line, hash_value = get_line_and_hash("iast_command_injection", VULN_CMDI, filename=TEST_FILE) - assert loaded["sources"] == [ - {"name": "http.request.body", "origin": "http.request.body", "pattern": "abcdef", "redacted": True} - ] - assert loaded["vulnerabilities"][0]["type"] == VULN_CMDI - assert loaded["vulnerabilities"][0]["hash"] == hash_value - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [{"value": "dir "}, {"redacted": True}, {"pattern": "abcdef", "redacted": True, "source": 0}] - } - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["sources"] == [ + {"name": "http.request.body", "origin": "http.request.body", "pattern": "abcdef", "redacted": True} + ] + assert loaded["vulnerabilities"][0]["type"] == VULN_CMDI + assert loaded["vulnerabilities"][0]["hash"] == hash_value + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [{"value": "dir "}, {"redacted": True}, {"pattern": "abcdef", "redacted": True, "source": 0}] + } + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_header_injection(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - patch_iast({"header_injection": True}) - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/header-injection/", - payload="master", - content_type="application/json", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/header-injection/", + payload="master", + content_type="application/json", + ) - loaded = json.loads(root_span.get_tag(IAST.JSON)) + loaded = json.loads(root_span.get_tag(IAST.JSON)) - line, hash_value = get_line_and_hash("iast_header_injection", VULN_HEADER_INJECTION, filename=TEST_FILE) + line, hash_value = get_line_and_hash("iast_header_injection", VULN_HEADER_INJECTION, filename=TEST_FILE) - assert loaded["sources"] == [{"origin": "http.request.body", "name": "http.request.body", "value": "master"}] - assert loaded["vulnerabilities"][0]["type"] == VULN_HEADER_INJECTION - assert loaded["vulnerabilities"][0]["hash"] == hash_value - assert loaded["vulnerabilities"][0]["evidence"] == { - "valueParts": [{"value": "Header-Injection: "}, {"source": 0, "value": "master"}] - } - assert loaded["vulnerabilities"][0]["location"]["line"] == line - assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["sources"] == [{"origin": "http.request.body", "name": "http.request.body", "value": "master"}] + assert loaded["vulnerabilities"][0]["type"] == VULN_HEADER_INJECTION + assert loaded["vulnerabilities"][0]["hash"] == hash_value + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [{"value": "Header-Injection: "}, {"source": 0, "value": "master"}] + } + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_insecure_cookie(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/insecure-cookie/test_insecure/", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/insecure-cookie/test_insecure/", + ) - assert root_span.get_metric(IAST.ENABLED) == 1.0 + assert root_span.get_metric(IAST.ENABLED) == 1.0 - loaded = json.loads(root_span.get_tag(IAST.JSON)) - assert loaded["sources"] == [] - assert len(loaded["vulnerabilities"]) == 1 - vulnerability = loaded["vulnerabilities"][0] - assert vulnerability["type"] == VULN_INSECURE_COOKIE - assert vulnerability["evidence"] == {"valueParts": [{"value": "insecure"}]} - assert "path" not in vulnerability["location"].keys() - assert "line" not in vulnerability["location"].keys() - assert vulnerability["location"]["spanId"] - assert vulnerability["hash"] + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [] + assert len(loaded["vulnerabilities"]) == 1 + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_INSECURE_COOKIE + assert vulnerability["evidence"] == {"valueParts": [{"value": "insecure"}]} + assert "path" not in vulnerability["location"].keys() + assert "line" not in vulnerability["location"].keys() + assert vulnerability["location"]["spanId"] + assert vulnerability["hash"] @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_insecure_cookie_secure(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/insecure-cookie/test_secure/", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/insecure-cookie/test_secure/", + ) - assert root_span.get_metric(IAST.ENABLED) == 1.0 + assert root_span.get_metric(IAST.ENABLED) == 1.0 - assert root_span.get_tag(IAST.JSON) is None + assert root_span.get_tag(IAST.JSON) is None @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_insecure_cookie_empty_cookie(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/insecure-cookie/test_empty_cookie/", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/insecure-cookie/test_empty_cookie/", + ) - assert root_span.get_metric(IAST.ENABLED) == 1.0 + assert root_span.get_metric(IAST.ENABLED) == 1.0 - assert root_span.get_tag(IAST.JSON) is None + assert root_span.get_tag(IAST.JSON) is None @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_insecure_cookie_2_insecure_1_secure(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/insecure-cookie/test_insecure_2_1/", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/insecure-cookie/test_insecure_2_1/", + ) - assert root_span.get_metric(IAST.ENABLED) == 1.0 + assert root_span.get_metric(IAST.ENABLED) == 1.0 - loaded = json.loads(root_span.get_tag(IAST.JSON)) - assert loaded["sources"] == [] - assert len(loaded["vulnerabilities"]) == 2 + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [] + assert len(loaded["vulnerabilities"]) == 2 @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_insecure_cookie_special_characters(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _iast_deduplication_enabled=False)): - oce.reconfigure() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/insecure-cookie/test_insecure_special/", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/insecure-cookie/test_insecure_special/", + ) - assert root_span.get_metric(IAST.ENABLED) == 1.0 + assert root_span.get_metric(IAST.ENABLED) == 1.0 - loaded = json.loads(root_span.get_tag(IAST.JSON)) - assert loaded["sources"] == [] - assert len(loaded["vulnerabilities"]) == 1 - vulnerability = loaded["vulnerabilities"][0] - assert vulnerability["type"] == VULN_INSECURE_COOKIE - assert vulnerability["evidence"] == {"valueParts": [{"value": "insecure"}]} - assert "path" not in vulnerability["location"].keys() - assert "line" not in vulnerability["location"].keys() - assert vulnerability["location"]["spanId"] - assert vulnerability["hash"] + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [] + assert len(loaded["vulnerabilities"]) == 1 + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_INSECURE_COOKIE + assert vulnerability["evidence"] == {"valueParts": [{"value": "insecure"}]} + assert "path" not in vulnerability["location"].keys() + assert "line" not in vulnerability["location"].keys() + assert vulnerability["location"]["spanId"] + assert vulnerability["hash"] @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_django_stacktrace_leak(client, test_spans, tracer): - with override_global_config(dict(_iast_enabled=True, _deduplication_enabled=False)): - oce.reconfigure() - root_span, _ = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/stacktrace_leak/", - ) + root_span, _ = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/stacktrace_leak/", + ) + + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [] + assert len(loaded["vulnerabilities"]) == 1 + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_STACKTRACE_LEAK + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": 'Module: ".home.foobaruser.sources.minimal-django-example.app.py"\nException: IndexError'} + ] + } + assert vulnerability["hash"] - assert root_span.get_metric(IAST.ENABLED) == 1.0 - - loaded = json.loads(root_span.get_tag(IAST.JSON)) - assert loaded["sources"] == [] - assert len(loaded["vulnerabilities"]) == 1 - vulnerability = loaded["vulnerabilities"][0] - assert vulnerability["type"] == VULN_STACKTRACE_LEAK - assert vulnerability["evidence"] == { - "valueParts": [ - {"value": 'Module: ".home.foobaruser.sources.minimal-django-example.app.py"\nException: IndexError'} - ] + +def test_django_stacktrace_from_technical_500_response(client, test_spans, tracer, debug_mode): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/stacktrace_leak_500/", + content_type="text/html", + ) + + assert response.status_code == 500, "Expected a 500 status code" + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + # technical_500_response reports a XSS also + vulnerability = [vln for vln in loaded["vulnerabilities"] if vln["type"] == VULN_STACKTRACE_LEAK][0] + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "Module: tests.appsec.integrations.django_tests.django_app.views\nException: Exception"} + ] + } + assert vulnerability["hash"] + + +def test_django_xss(client, test_spans, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/xss/?input=", + ) + + vuln_type = "XSS" + + assert response.status_code == 200 + assert response.content == b"\n\n

Input:

\n\n" + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + + line, hash_value = get_line_and_hash("xss_http_request_parameter_mark_safe", vuln_type, filename=TEST_FILE) + + assert loaded["sources"] == [ + { + "name": "input", + "origin": "http.request.parameter", + "value": "", } - assert vulnerability["hash"] + ] + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": ""}, + ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value -@pytest.fixture -def debug_mode(): - from django.conf import settings - original_debug = settings.DEBUG - settings.DEBUG = True - yield - settings.DEBUG = original_debug +def test_django_xss_safe_template_tag(client, test_spans, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/xss/safe/?input=", + ) + vuln_type = "XSS" -@pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") -def test_django_stacktrace_from_technical_500_response(client, test_spans, tracer, debug_mode): - with override_global_config(dict(_iast_enabled=True, _deduplication_enabled=False)): - with tracer.trace("test", span_type=SpanTypes.WEB, service="test") as span: - start_context(span) - oce.reconfigure() - root_span, response = _aux_appsec_get_root_span( - client, - test_spans, - tracer, - url="/appsec/stacktrace_leak_500/", - content_type="text/html", - ) - - assert response.status_code == 500, "Expected a 500 status code" - assert root_span.get_metric(IAST.ENABLED) == 1.0 - - loaded = json.loads(root_span.get_tag(IAST.JSON)) - assert loaded["sources"] == [] - assert len(loaded["vulnerabilities"]) == 1 - vulnerability = loaded["vulnerabilities"][0] - assert vulnerability["type"] == VULN_STACKTRACE_LEAK - assert vulnerability["evidence"] == { - "valueParts": [ - {"value": "Module: tests.appsec.integrations.django_tests.django_app.views\nException: Exception"} - ] - } - assert vulnerability["hash"] + assert response.status_code == 200 + assert response.content == b"\n\n

Input:

\n\n" + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + + line, hash_value = get_line_and_hash("xss_http_request_parameter_template_safe", vuln_type, filename=TEST_FILE) + + assert loaded["sources"] == [ + { + "name": "input", + "origin": "http.request.parameter", + "value": "", + } + ] + + assert loaded["vulnerabilities"][0]["type"] == vuln_type + assert loaded["vulnerabilities"][0]["evidence"] == { + "valueParts": [ + {"source": 0, "value": ""}, + ] + } + assert loaded["vulnerabilities"][0]["location"]["path"] == TEST_FILE + assert loaded["vulnerabilities"][0]["location"]["line"] == line + assert loaded["vulnerabilities"][0]["hash"] == hash_value + + +def test_django_xss_autoscape(client, test_spans, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/xss/autoscape/?input=", + ) + + assert response.status_code == 200 + assert ( + response.content + == b"\n\n

\n <script>alert('XSS')</script>\n

\n\n\n" + ), f"Error. content is {response.content}" + + loaded = root_span.get_tag(IAST.JSON) + assert loaded is None + + +def test_django_xss_secure(client, test_spans, tracer): + root_span, response = _aux_appsec_get_root_span( + client, + test_spans, + tracer, + url="/appsec/xss/secure/?input=", + ) + + assert response.status_code == 200 + assert ( + response.content + == b"\n\n

Input: <script>alert('XSS')</script>

\n\n" + ) + + loaded = root_span.get_tag(IAST.JSON) + assert loaded is None diff --git a/tests/contrib/yaaredis/__init__.py b/tests/appsec/integrations/fastapi_tests/__init__.py similarity index 100% rename from tests/contrib/yaaredis/__init__.py rename to tests/appsec/integrations/fastapi_tests/__init__.py diff --git a/tests/appsec/integrations/fastapi_tests/app.py b/tests/appsec/integrations/fastapi_tests/app.py new file mode 100644 index 00000000000..40df0036fd2 --- /dev/null +++ b/tests/appsec/integrations/fastapi_tests/app.py @@ -0,0 +1,6 @@ +from fastapi import FastAPI + + +def get_app(): + app = FastAPI() + return app diff --git a/tests/appsec/integrations/fastapi_tests/conftest.py b/tests/appsec/integrations/fastapi_tests/conftest.py new file mode 100644 index 00000000000..5b235f5e6dd --- /dev/null +++ b/tests/appsec/integrations/fastapi_tests/conftest.py @@ -0,0 +1,41 @@ +from fastapi.testclient import TestClient +import pytest + +import ddtrace +from ddtrace.contrib.internal.fastapi.patch import patch as fastapi_patch +from ddtrace.contrib.internal.fastapi.patch import unpatch as fastapi_unpatch +from tests.utils import DummyTracer +from tests.utils import TracerSpanContainer + +from . import app + + +@pytest.fixture +def tracer(): + original_tracer = ddtrace.tracer + tracer = DummyTracer() + + ddtrace.tracer = tracer + fastapi_patch() + yield tracer + ddtrace.tracer = original_tracer + fastapi_unpatch() + + +@pytest.fixture +def test_spans(tracer): + container = TracerSpanContainer(tracer) + yield container + container.reset() + + +@pytest.fixture +def fastapi_application(tracer): + application = app.get_app() + yield application + + +@pytest.fixture +def client(tracer, fastapi_application): + with TestClient(fastapi_application) as test_client: + yield test_client diff --git a/tests/contrib/fastapi/test_fastapi_appsec.py b/tests/appsec/integrations/fastapi_tests/test_fastapi_appsec.py similarity index 100% rename from tests/contrib/fastapi/test_fastapi_appsec.py rename to tests/appsec/integrations/fastapi_tests/test_fastapi_appsec.py diff --git a/tests/contrib/fastapi/test_fastapi_appsec_iast.py b/tests/appsec/integrations/fastapi_tests/test_fastapi_appsec_iast.py similarity index 96% rename from tests/contrib/fastapi/test_fastapi_appsec_iast.py rename to tests/appsec/integrations/fastapi_tests/test_fastapi_appsec_iast.py index 23174d81abf..ee0b4e041d1 100644 --- a/tests/contrib/fastapi/test_fastapi_appsec_iast.py +++ b/tests/appsec/integrations/fastapi_tests/test_fastapi_appsec_iast.py @@ -27,6 +27,7 @@ from ddtrace.appsec._iast.constants import VULN_NO_SAMESITE_COOKIE from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.appsec._iast.constants import VULN_STACKTRACE_LEAK +from ddtrace.appsec._iast.constants import VULN_XSS from ddtrace.contrib.internal.fastapi.patch import patch as patch_fastapi from ddtrace.contrib.internal.sqlite3.patch import patch as patch_sqlite_sqli from tests.appsec.iast.iast_utils import get_line_and_hash @@ -35,7 +36,7 @@ from tests.utils import override_global_config -TEST_FILE_PATH = "tests/contrib/fastapi/test_fastapi_appsec_iast.py" +TEST_FILE_PATH = "tests/appsec/integrations/fastapi_tests/test_fastapi_appsec_iast.py" fastapi_version = tuple([int(v) for v in _fastapi_version.split(".")]) @@ -987,3 +988,34 @@ async def stacktrace_leak_inline_response(request: Request): assert len(loaded["vulnerabilities"]) == 1 vulnerability = loaded["vulnerabilities"][0] assert vulnerability["type"] == VULN_STACKTRACE_LEAK + + +def test_fastapi_xss(fastapi_application, client, tracer, test_spans): + @fastapi_application.get("/index.html") + async def test_route(request: Request): + from fastapi.responses import HTMLResponse + from jinja2 import Template + + query_params = request.query_params.get("iast_queryparam") + template = Template("

{{ user_input|safe }}

") + html = template.render(user_input=query_params) + return HTMLResponse(html) + + with override_global_config(dict(_iast_enabled=True, _iast_request_sampling=100.0)): + patch_iast({"xss": True}) + _aux_appsec_prepare_tracer(tracer) + resp = client.get( + "/index.html?iast_queryparam=test1234", + headers={"Content-Type": "application/json"}, + ) + assert resp.status_code == 200 + + span = test_spans.pop_traces()[0][0] + assert span.get_metric(IAST.ENABLED) == 1.0 + + iast_tag = span.get_tag(IAST.JSON) + assert iast_tag is not None + loaded = json.loads(iast_tag) + assert len(loaded["vulnerabilities"]) == 1 + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_XSS diff --git a/tests/appsec/integrations/flask_tests/mini.py b/tests/appsec/integrations/flask_tests/mini.py new file mode 100644 index 00000000000..5254d2ad5bd --- /dev/null +++ b/tests/appsec/integrations/flask_tests/mini.py @@ -0,0 +1,36 @@ +import ddtrace.auto # noqa: F401 + + +"""do not move this import""" + +import os # noqa: E402 +import sys # noqa: E402 + +from flask import Flask # noqa: E402 +import requests # noqa: E402 F401 + +from ddtrace.settings.asm import config as asm_config # noqa: E402 +from ddtrace.version import get_version # noqa: E402 + + +app = Flask(__name__) + + +@app.route("/") +def hello_world(): + res = [] + for m in sys.modules: + if m.startswith("ddtrace.appsec"): + res.append(m) + return { + "appsec": list(sorted(res)), + "asm_config": { + k: getattr(asm_config, k) for k in dir(asm_config) if isinstance(getattr(asm_config, k), (int, bool, float)) + }, + "aws": "AWS_LAMBDA_FUNCTION_NAME" in os.environ, + "version": get_version(), + } + + +if __name__ == "__main__": + app.run(debug=True, port=8475) diff --git a/tests/appsec/integrations/flask_tests/test_appsec_loading_modules.py b/tests/appsec/integrations/flask_tests/test_appsec_loading_modules.py new file mode 100644 index 00000000000..e989ee5d612 --- /dev/null +++ b/tests/appsec/integrations/flask_tests/test_appsec_loading_modules.py @@ -0,0 +1,84 @@ +import json +import os +import pathlib +import subprocess +import time +from urllib.error import HTTPError +from urllib.error import URLError +from urllib.request import urlopen + +import pytest + + +MODULES_ALWAYS_LOADED = ["ddtrace.appsec", "ddtrace.appsec._capabilities", "ddtrace.appsec._constants"] +MODULE_ASM_ONLY = ["ddtrace.appsec._processor", "ddtrace.appsec._ddwaf"] +MODULE_IAST_ONLY = [ + "ddtrace.appsec._iast", + "ddtrace.appsec._iast._taint_tracking._native", + "ddtrace.appsec._iast._stacktrace", +] + + +@pytest.mark.parametrize("appsec_enabled", ["true", "false"]) +@pytest.mark.parametrize("iast_enabled", ["true", None]) +@pytest.mark.parametrize("aws_lambda", ["any", None]) +def test_loading(appsec_enabled, iast_enabled, aws_lambda): + flask_app = pathlib.Path(__file__).parent / "mini.py" + env = os.environ.copy() + if appsec_enabled: + env["DD_APPSEC_ENABLED"] = appsec_enabled + else: + env.pop("DD_APPSEC_ENABLED", None) + if iast_enabled: + env["DD_IAST_ENABLED"] = iast_enabled + else: + env.pop("DD_IAST_ENABLED", None) + if aws_lambda: + env["AWS_LAMBDA_FUNCTION_NAME"] = aws_lambda + else: + env.pop("AWS_LAMBDA_FUNCTION_NAME", None) + + process = subprocess.Popen( + ["python", str(flask_app)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + ) + for i in range(16): + time.sleep(1) + try: + with urlopen("http://localhost:8475") as response: + assert response.status == 200 + payload = response.read().decode() + data = json.loads(payload) + assert "appsec" in data + # appsec is always enabled + for m in MODULES_ALWAYS_LOADED: + assert m in data["appsec"], f"{m} not in {data['appsec']}" + for m in MODULE_ASM_ONLY: + if appsec_enabled == "true" and not aws_lambda: + assert m in data["appsec"], f"{m} not in {data['appsec']}" + else: + assert m not in data["appsec"], f"{m} in {data['appsec']}" + for m in MODULE_IAST_ONLY: + if iast_enabled and not aws_lambda: + assert m in data["appsec"], f"{m} not in {data['appsec']}" + else: + assert m not in data["appsec"], f"{m} in {data['appsec']}" + process.terminate() + process.wait() + break + except HTTPError as e: + process.terminate() + process.wait() + raise AssertionError(e.read().decode()) + except URLError: + continue + except AssertionError: + process.terminate() + process.wait() + raise + else: + process.terminate() + process.wait() + raise AssertionError("Server did not start") diff --git a/tests/appsec/integrations/flask_tests/test_iast_flask.py b/tests/appsec/integrations/flask_tests/test_iast_flask.py index be45e6bb82f..6aa0558a737 100644 --- a/tests/appsec/integrations/flask_tests/test_iast_flask.py +++ b/tests/appsec/integrations/flask_tests/test_iast_flask.py @@ -16,7 +16,9 @@ from ddtrace.appsec._iast.constants import VULN_NO_SAMESITE_COOKIE from ddtrace.appsec._iast.constants import VULN_SQL_INJECTION from ddtrace.appsec._iast.constants import VULN_STACKTRACE_LEAK +from ddtrace.appsec._iast.constants import VULN_XSS from ddtrace.appsec._iast.taint_sinks.header_injection import patch as patch_header_injection +from ddtrace.appsec._iast.taint_sinks.xss import patch as patch_xss_injection from ddtrace.contrib.internal.sqlite3.patch import patch as patch_sqlite_sqli from ddtrace.settings.asm import config as asm_config from tests.appsec.iast.iast_utils import get_line_and_hash @@ -45,11 +47,11 @@ def setUp(self): _iast_request_sampling=100.0, ) ): - super(FlaskAppSecIASTEnabledTestCase, self).setUp() patch_sqlite_sqli() patch_header_injection() + patch_xss_injection() patch_json() - + super(FlaskAppSecIASTEnabledTestCase, self).setUp() self.tracer._configure(api_version="v0.4", appsec_enabled=True, iast_enabled=True) oce.reconfigure() @@ -59,7 +61,6 @@ def test_flask_full_sqli_iast_http_request_path_parameter(self): def sqli_1(param_str): import sqlite3 - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect assert is_pyobject_tainted(param_str) @@ -161,6 +162,62 @@ def sqli_2(param_str): assert vulnerability["location"]["path"] == TEST_FILE_PATH assert vulnerability["hash"] == hash_value + @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") + def test_flask_iast_enabled_http_request_header_get(self): + @self.app.route("/sqli//", methods=["GET", "POST"]) + def sqli_2(param_str): + import sqlite3 + + from flask import request + + from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect + + con = sqlite3.connect(":memory:") + cur = con.cursor() + # label test_flask_iast_enabled_http_request_header_get + cur.execute(add_aspect("SELECT 1 FROM ", request.headers.get("User-Agent"))) + + return "OK", 200 + + with override_global_config( + dict( + _iast_enabled=True, + _iast_deduplication_enabled=False, + ) + ): + resp = self.client.post( + "/sqli/sqlite_master/", data={"name": "test"}, headers={"User-Agent": "sqlite_master"} + ) + assert resp.status_code == 200 + + root_span = self.pop_spans()[0] + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [ + {"origin": "http.request.header", "name": "User-Agent", "value": "sqlite_master"} + ] + + line, hash_value = get_line_and_hash( + "test_flask_iast_enabled_http_request_header_get", + VULN_SQL_INJECTION, + filename=TEST_FILE_PATH, + ) + vulnerability = loaded["vulnerabilities"][0] + + assert vulnerability["type"] == VULN_SQL_INJECTION + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "SELECT "}, + {"redacted": True}, + {"value": " FROM "}, + {"value": "sqlite_master", "source": 0}, + ] + } + assert vulnerability["location"]["line"] == line + assert vulnerability["location"]["path"] == TEST_FILE_PATH + assert vulnerability["hash"] == hash_value + @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_flask_full_sqli_iast_enabled_http_request_header_name_keys(self): @self.app.route("/sqli//", methods=["GET", "POST"]) @@ -274,7 +331,6 @@ def sqli_5(param_str, param_int): from ddtrace.appsec._iast._taint_tracking import OriginType from ddtrace.appsec._iast._taint_tracking._taint_objects import get_tainted_ranges - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted header_ranges = get_tainted_ranges(request.headers["User-Agent"]) assert header_ranges @@ -324,8 +380,6 @@ def test_flask_simple_iast_path_header_and_querystring_tainted_request_sampling_ def sqli_6(param_str): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - # Note: these are not tainted because of request sampling at 0% assert not is_pyobject_tainted(request.headers["User-Agent"]) assert not is_pyobject_tainted(request.query_string) @@ -535,7 +589,6 @@ def test_flask_full_sqli_iast_http_request_parameter_name_post(self): def sqli_13(): import sqlite3 - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect for i in request.form.keys(): @@ -593,7 +646,6 @@ def test_flask_full_sqli_iast_http_request_parameter_name_get(self): def sqli_14(): import sqlite3 - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect for i in request.args.keys(): @@ -654,7 +706,6 @@ def sqli_10(): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect con = sqlite3.connect(":memory:") @@ -719,7 +770,6 @@ def sqli_11(): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect con = sqlite3.connect(":memory:") @@ -784,7 +834,6 @@ def sqli_11(): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect con = sqlite3.connect(":memory:") @@ -849,7 +898,6 @@ def sqli_11(): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect con = sqlite3.connect(":memory:") @@ -916,7 +964,6 @@ def sqli_11(): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect def iterate_json(data, parent_key=""): @@ -1057,7 +1104,6 @@ def sqli_10(): from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect con = sqlite3.connect(":memory:") @@ -1160,8 +1206,6 @@ def header_injection(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1194,14 +1238,12 @@ def header_injection(): # TODO: vulnerability path is flaky, it points to "tests/contrib/flask/__init__.py" @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") - def test_flask_header_injection_exlusions_location(self): + def test_flask_header_injection_exclusions_location(self): @self.app.route("/header_injection/", methods=["GET", "POST"]) def header_injection(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1223,14 +1265,12 @@ def header_injection(): assert root_span.get_tag(IAST.JSON) is None @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") - def test_flask_header_injection_exlusions_access_control(self): + def test_flask_header_injection_exclusions_access_control(self): @self.app.route("/header_injection/", methods=["GET", "POST"]) def header_injection(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1258,8 +1298,6 @@ def insecure_cookie(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1296,8 +1334,6 @@ def insecure_cookie_empty(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1326,8 +1362,6 @@ def no_http_only_cookie(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1364,8 +1398,6 @@ def no_http_only_cookie_empty(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1395,8 +1427,6 @@ def no_samesite_cookie(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1433,8 +1463,6 @@ def no_samesite_cookie_empty(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1461,8 +1489,6 @@ def cookie_secure(): from flask import Response from flask import request - from ddtrace.appsec._iast._taint_tracking._taint_objects import is_pyobject_tainted - tainted_string = request.form.get("name") assert is_pyobject_tainted(tainted_string) resp = Response("OK") @@ -1587,6 +1613,159 @@ def stacktrace_leak(): ) assert "Exception: ValueError" in vulnerability["evidence"]["valueParts"][0]["value"] + def test_flask_xss(self): + @self.app.route("/xss/", methods=["GET"]) + def xss_view(): + from flask import render_template_string + from flask import request + + user_input = request.args.get("input", "") + + # label test_flask_xss + return render_template_string("

XSS: {{ user_input|safe }}

", user_input=user_input) + + with override_global_config( + dict( + _iast_enabled=True, + _iast_deduplication_enabled=False, + _iast_request_sampling=100.0, + ) + ): + resp = self.client.get("/xss/?input=") + assert resp.status_code == 200 + assert resp.data == b"

XSS:

" + + root_span = self.pop_spans()[0] + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [ + {"origin": "http.request.parameter", "name": "input", "value": ""} + ] + + line, hash_value = get_line_and_hash("test_flask_xss", VULN_SQL_INJECTION, filename=TEST_FILE_PATH) + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_XSS + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "", "source": 0}, + ] + } + assert vulnerability["location"]["line"] == line + assert vulnerability["location"]["path"] == TEST_FILE_PATH + + def test_flask_xss_concat(self): + @self.app.route("/xss/concat/", methods=["GET"]) + def xss_view(): + from flask import render_template_string + from flask import request + + from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect + + user_input = request.args.get("input", "") + + # label test_flask_xss_concat + return render_template_string(add_aspect(add_aspect("

XSS: ", user_input), "

")) + + with override_global_config( + dict( + _iast_enabled=True, + _iast_deduplication_enabled=False, + _iast_request_sampling=100.0, + ) + ): + resp = self.client.get("/xss/concat/?input=") + assert resp.status_code == 200 + assert resp.data == b"

XSS:

" + + root_span = self.pop_spans()[0] + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [ + {"origin": "http.request.parameter", "name": "input", "value": ""} + ] + + line, hash_value = get_line_and_hash("test_flask_xss_concat", VULN_SQL_INJECTION, filename=TEST_FILE_PATH) + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_XSS + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "

XSS: "}, + {"source": 0, "value": ""}, + {"value": "

"}, + ] + } + assert vulnerability["location"]["line"] == line + assert vulnerability["location"]["path"] == TEST_FILE_PATH + + def test_flask_xss_template_secure(self): + @self.app.route("/xss/template/secure/", methods=["GET"]) + def xss_view_template(): + from flask import render_template + from flask import request + + user_input = request.args.get("input", "") + + # label test_flask_xss_template + return render_template("test.html", world=user_input) + + with override_global_config( + dict( + _iast_enabled=True, + _iast_deduplication_enabled=False, + _iast_request_sampling=100.0, + ) + ): + resp = self.client.get("/xss/template/secure/?input=") + assert resp.status_code == 200 + assert resp.data == b"hello <script>alert('XSS')</script>" + + root_span = self.pop_spans()[0] + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + assert root_span.get_tag(IAST.JSON) is None + + def test_flask_xss_template(self): + @self.app.route("/xss/template/", methods=["GET"]) + def xss_view_template(): + from flask import render_template + from flask import request + + user_input = request.args.get("input", "") + + # label test_flask_xss_template + return render_template("test_insecure.html", world=user_input) + + with override_global_config( + dict( + _iast_enabled=True, + _iast_deduplication_enabled=False, + _iast_request_sampling=100.0, + ) + ): + resp = self.client.get("/xss/template/?input=") + assert resp.status_code == 200 + assert resp.data == b"hello " + + root_span = self.pop_spans()[0] + assert root_span.get_metric(IAST.ENABLED) == 1.0 + + loaded = json.loads(root_span.get_tag(IAST.JSON)) + assert loaded["sources"] == [ + {"origin": "http.request.parameter", "name": "input", "value": ""} + ] + + line, hash_value = get_line_and_hash("test_flask_xss", VULN_SQL_INJECTION, filename=TEST_FILE_PATH) + vulnerability = loaded["vulnerabilities"][0] + assert vulnerability["type"] == VULN_XSS + assert vulnerability["evidence"] == { + "valueParts": [ + {"value": "", "source": 0}, + ] + } + assert vulnerability["location"]["path"] == "tests/contrib/flask/test_templates/test_insecure.html" + class FlaskAppSecIASTDisabledTestCase(BaseFlaskTestCase): @pytest.fixture(autouse=True) @@ -1621,18 +1800,19 @@ def test_sqli(): return "OK", 200 - if tuple(map(int, werkzeug_version.split("."))) >= (2, 3): - self.client.set_cookie(domain="localhost", key="sqlite_master", value="sqlite_master3") - else: - self.client.set_cookie(server_name="localhost", key="sqlite_master", value="sqlite_master3") + with override_global_config(dict(_iast_enabled=False)): + if tuple(map(int, werkzeug_version.split("."))) >= (2, 3): + self.client.set_cookie(domain="localhost", key="sqlite_master", value="sqlite_master3") + else: + self.client.set_cookie(server_name="localhost", key="sqlite_master", value="sqlite_master3") - resp = self.client.post("/sqli/cookies/") - assert resp.status_code == 200 + resp = self.client.post("/sqli/cookies/") + assert resp.status_code == 200 - root_span = self.pop_spans()[0] - assert root_span.get_metric(IAST.ENABLED) is None + root_span = self.pop_spans()[0] + assert root_span.get_metric(IAST.ENABLED) is None - assert root_span.get_tag(IAST.JSON) is None + assert root_span.get_tag(IAST.JSON) is None @pytest.mark.skipif(not asm_config._iast_supported, reason="Python version not supported by IAST") def test_flask_full_sqli_iast_disabled_http_request_header_getitem(self): diff --git a/tests/appsec/integrations/pygoat_tests/test_pygoat.py b/tests/appsec/integrations/pygoat_tests/test_pygoat.py index 8bb8baae1bd..8e6ff5da0ff 100644 --- a/tests/appsec/integrations/pygoat_tests/test_pygoat.py +++ b/tests/appsec/integrations/pygoat_tests/test_pygoat.py @@ -5,7 +5,6 @@ import requests from tests.appsec.iast.conftest import iast_context_defaults -from tests.utils import flaky span_defaults = iast_context_defaults # So ruff does not remove it @@ -108,7 +107,6 @@ def test_nohttponly_cookie(client): assert vulnerability_in_traces("NO_HTTPONLY_COOKIE", client.agent_session) -@flaky(1735812000) def test_weak_random(client): reply = client.pygoat_session.get(PYGOAT_URL + "/otp?email=test%40test.com", headers=TESTAGENT_HEADERS) assert reply.status_code == 200 @@ -124,7 +122,6 @@ def test_weak_hash(client): assert vulnerability_in_traces("WEAK_HASH", client.agent_session) -@flaky(1735812000) def test_cmdi(client): payload = {"domain": "google.com && ls", "csrfmiddlewaretoken": client.csrftoken} reply = client.pygoat_session.post(PYGOAT_URL + "/cmd_lab", data=payload, headers=TESTAGENT_HEADERS) @@ -132,7 +129,6 @@ def test_cmdi(client): assert vulnerability_in_traces("COMMAND_INJECTION", client.agent_session) -@pytest.mark.skip("TODO: fix interaction with new RASP rules") def test_sqli(client): payload = {"name": "admin", "pass": "anything' OR '1' ='1", "csrfmiddlewaretoken": client.csrftoken} reply = client.pygoat_session.post(PYGOAT_URL + "/sql_lab", data=payload, headers=TESTAGENT_HEADERS) @@ -142,34 +138,20 @@ def test_sqli(client): @pytest.mark.skip("TODO: SSRF is not implemented for open()") def test_ssrf1(client, iast_context_defaults): - from ddtrace.appsec._iast._taint_tracking import OriginType - from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject - - s = "templates/Lab/ssrf/blogs/blog2.txt" - tainted_path = taint_pyobject( - pyobject=s, - source_name="test_ssrf", - source_value=s, - source_origin=OriginType.PARAMETER, - ) - payload = {"blog": tainted_path, "csrfmiddlewaretoken": client.csrftoken} + payload = {"blog": "templates/Lab/ssrf/blogs/blog2.txt", "csrfmiddlewaretoken": client.csrftoken} reply = client.pygoat_session.post(PYGOAT_URL + "/ssrf_lab", data=payload, headers=TESTAGENT_HEADERS) assert reply.status_code == 200 assert vulnerability_in_traces("SSRF", client.agent_session) def test_ssrf2(client, iast_context_defaults): - from ddtrace.appsec._iast._taint_tracking import OriginType - from ddtrace.appsec._iast._taint_tracking._taint_objects import taint_pyobject - - s = "http://example.com" - tainted_path = taint_pyobject( - pyobject=s, - source_name="test_ssrf", - source_value=s, - source_origin=OriginType.PARAMETER, - ) - payload = {"url": tainted_path, "csrfmiddlewaretoken": client.csrftoken} + payload = {"url": "http://example.com", "csrfmiddlewaretoken": client.csrftoken} reply = client.pygoat_session.post(PYGOAT_URL + "/ssrf_lab2", data=payload, headers=TESTAGENT_HEADERS) assert reply.status_code == 200 assert vulnerability_in_traces("SSRF", client.agent_session) + + +def test_xss(client): + reply = client.pygoat_session.get(PYGOAT_URL + '/xssL?q=', headers=TESTAGENT_HEADERS) + assert reply.status_code == 200 + assert vulnerability_in_traces("XSS", client.agent_session) diff --git a/tests/ci_visibility/test_ci_visibility.py b/tests/ci_visibility/test_ci_visibility.py index 1db4f068c7a..778568f544e 100644 --- a/tests/ci_visibility/test_ci_visibility.py +++ b/tests/ci_visibility/test_ci_visibility.py @@ -28,6 +28,7 @@ from ddtrace.internal.ci_visibility.git_client import METADATA_UPLOAD_STATUS from ddtrace.internal.ci_visibility.git_client import CIVisibilityGitClient from ddtrace.internal.ci_visibility.git_client import CIVisibilityGitClientSerializerV1 +from ddtrace.internal.ci_visibility.recorder import CIVisibilityTracer from ddtrace.internal.ci_visibility.recorder import _extract_repository_name_from_url import ddtrace.internal.test_visibility._internal_item_ids from ddtrace.internal.utils.http import Response @@ -685,7 +686,7 @@ def test_civisibilitywriter_evp_proxy_url(self): ), mock.patch( "ddtrace.internal.agent.get_trace_url", return_value="http://evpproxy.bar:1234" ), mock.patch("ddtrace.settings._config.Config", _get_default_civisibility_ddconfig()), mock.patch( - "ddtrace.tracer", ddtrace.trace.Tracer() + "ddtrace.tracer", CIVisibilityTracer() ), mock.patch( "ddtrace.internal.ci_visibility.recorder.CIVisibility._agent_evp_proxy_is_available", return_value=True ), _dummy_noop_git_client(), mock.patch( @@ -705,7 +706,7 @@ def test_civisibilitywriter_only_traces(self): ) ), mock.patch( "ddtrace.internal.agent.get_trace_url", return_value="http://onlytraces:1234" - ), mock.patch("ddtrace.tracer", ddtrace.trace.Tracer()), mock.patch( + ), mock.patch("ddtrace.tracer", CIVisibilityTracer()), mock.patch( "ddtrace.internal.ci_visibility.recorder.CIVisibility._agent_evp_proxy_is_available", return_value=False ), mock.patch( "ddtrace.internal.ci_visibility.writer.config", ddtrace.settings.Config() @@ -1119,7 +1120,7 @@ def test_civisibility_enable_respects_passed_in_tracer(): ), _dummy_noop_git_client(), mock.patch( "ddtrace.internal.ci_visibility.recorder.ddconfig", _get_default_civisibility_ddconfig() ), mock.patch("ddtrace.internal.ci_visibility.writer.config", ddtrace.settings.Config()): - tracer = ddtrace.trace.Tracer() + tracer = CIVisibilityTracer() tracer._configure(partial_flush_enabled=False, partial_flush_min_spans=100) CIVisibility.enable(tracer=tracer) assert CIVisibility._instance.tracer._partial_flush_enabled is False diff --git a/tests/ci_visibility/util.py b/tests/ci_visibility/util.py index dc0b886ca64..f1911e20e93 100644 --- a/tests/ci_visibility/util.py +++ b/tests/ci_visibility/util.py @@ -12,6 +12,7 @@ from ddtrace.internal.ci_visibility.git_client import METADATA_UPLOAD_STATUS from ddtrace.internal.ci_visibility.git_client import CIVisibilityGitClient from ddtrace.internal.ci_visibility.recorder import CIVisibility +from ddtrace.internal.ci_visibility.recorder import CIVisibilityTracer from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId from tests.utils import DummyCIVisibilityWriter from tests.utils import override_env @@ -209,5 +210,5 @@ def _ci_override_env( new_vars: t.Optional[t.Dict[str, str]] = None, mock_ci_env=False, replace_os_env=True, full_clear=False ): env_vars = _get_default_ci_env_vars(new_vars, mock_ci_env, full_clear) - with override_env(env_vars, replace_os_env=replace_os_env), mock.patch("ddtrace.tracer", ddtrace.trace.Tracer()): + with override_env(env_vars, replace_os_env=replace_os_env), mock.patch("ddtrace.tracer", CIVisibilityTracer()): yield diff --git a/tests/contrib/aiobotocore/utils.py b/tests/contrib/aiobotocore/utils.py index b51b6550327..e2eb56c6026 100644 --- a/tests/contrib/aiobotocore/utils.py +++ b/tests/contrib/aiobotocore/utils.py @@ -42,11 +42,11 @@ async def aiobotocore_client(service, tracer): client, aiobotocore.session.ClientCreatorContext ): async with client as client: - Pin.override(client, tracer=tracer) + Pin._override(client, tracer=tracer) await yield_(client) else: - Pin.override(client, tracer=tracer) + Pin._override(client, tracer=tracer) try: await yield_(client) finally: diff --git a/tests/contrib/aiohttp/test_aiohttp_client.py b/tests/contrib/aiohttp/test_aiohttp_client.py index 2b2b51c2650..c020b1266c6 100644 --- a/tests/contrib/aiohttp/test_aiohttp_client.py +++ b/tests/contrib/aiohttp/test_aiohttp_client.py @@ -189,7 +189,7 @@ def test_configure_service_name_pin(ddtrace_run_python_code_in_subprocess): async def test(): async with aiohttp.ClientSession() as session: - Pin.override(session, service="pin-custom-svc") + Pin._override(session, service="pin-custom-svc") async with session.get(URL_200) as resp: pass diff --git a/tests/contrib/aiohttp/test_request.py b/tests/contrib/aiohttp/test_request.py index 36e9d8a399a..056eda09c4b 100644 --- a/tests/contrib/aiohttp/test_request.py +++ b/tests/contrib/aiohttp/test_request.py @@ -31,6 +31,33 @@ async def test_full_request(patched_app_tracer, aiohttp_client, loop): assert "GET /" == request_span.resource +async def test_full_request_w_mem_leak_prevention_flag(patched_app_tracer, aiohttp_client, loop): + config.aiohttp.disable_stream_timing_for_mem_leak = True + try: + app, tracer = patched_app_tracer + client = await aiohttp_client(app) + # it should create a root span when there is a handler hit + # with the proper tags + request = await client.request("GET", "/") + assert 200 == request.status + await request.text() + # the trace is created + traces = tracer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + request_span = traces[0][0] + assert_is_measured(request_span) + + # request + assert "aiohttp-web" == request_span.service + assert "aiohttp.request" == request_span.name + assert "GET /" == request_span.resource + except Exception: + raise + finally: + config.aiohttp.disable_stream_timing_for_mem_leak = False + + async def test_stream_request(patched_app_tracer, aiohttp_client, loop): app, tracer = patched_app_tracer async with await aiohttp_client(app) as client: diff --git a/tests/contrib/aiohttp_jinja2/conftest.py b/tests/contrib/aiohttp_jinja2/conftest.py index a58b72f7f49..1624753a635 100644 --- a/tests/contrib/aiohttp_jinja2/conftest.py +++ b/tests/contrib/aiohttp_jinja2/conftest.py @@ -13,7 +13,7 @@ def patched_app_tracer_jinja(patched_app_tracer): # noqa: F811 app, tracer = patched_app_tracer patch() - Pin.override(aiohttp_jinja2, tracer=tracer) + Pin._override(aiohttp_jinja2, tracer=tracer) yield app, tracer unpatch() @@ -22,6 +22,6 @@ def patched_app_tracer_jinja(patched_app_tracer): # noqa: F811 def untraced_app_tracer_jinja(untraced_app_tracer): # noqa: F811 patch() app, tracer = untraced_app_tracer - Pin.override(aiohttp_jinja2, tracer=tracer) + Pin._override(aiohttp_jinja2, tracer=tracer) yield app, tracer unpatch() diff --git a/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py b/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py index 089c799ebe0..98a942b1637 100644 --- a/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py +++ b/tests/contrib/aiohttp_jinja2/test_aiohttp_jinja2.py @@ -35,7 +35,7 @@ async def test_template_rendering(untraced_app_tracer_jinja, aiohttp_client): async def test_template_rendering_snapshot(untraced_app_tracer_jinja, aiohttp_client, snapshot_context): app, _ = untraced_app_tracer_jinja - Pin.override(aiohttp_jinja2, tracer=tracer) + Pin._override(aiohttp_jinja2, tracer=tracer) with snapshot_context(): client = await aiohttp_client(app) # it should trace a template rendering @@ -51,7 +51,7 @@ async def test_template_rendering_snapshot_patched_server( use_global_tracer, ): app, _ = patched_app_tracer_jinja - Pin.override(aiohttp_jinja2, tracer=tracer) + Pin._override(aiohttp_jinja2, tracer=tracer) # Ignore meta.http.url tag as the port is not fixed on the server with snapshot_context(ignores=["meta.http.url", "meta.http.useragent"]): client = await aiohttp_client(app) diff --git a/tests/contrib/aiomysql/test_aiomysql.py b/tests/contrib/aiomysql/test_aiomysql.py index 8199b5c16a1..06d53d4ce05 100644 --- a/tests/contrib/aiomysql/test_aiomysql.py +++ b/tests/contrib/aiomysql/test_aiomysql.py @@ -9,7 +9,6 @@ from ddtrace.contrib.internal.aiomysql.patch import unpatch from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME from ddtrace.trace import Pin -from ddtrace.trace import Tracer from tests.contrib import shared_tests_async as shared_tests from tests.contrib.asyncio.utils import AsyncioTestCase from tests.contrib.asyncio.utils import mark_asyncio @@ -31,19 +30,16 @@ def patch_aiomysql(): @pytest.fixture async def patched_conn(tracer): conn = await aiomysql.connect(**AIOMYSQL_CONFIG) - Pin.get_from(conn).clone(tracer=tracer).onto(conn) yield conn conn.close() @pytest.fixture() -async def snapshot_conn(): - tracer = Tracer() +async def snapshot_conn(tracer): conn = await aiomysql.connect(**AIOMYSQL_CONFIG) - Pin.get_from(conn).clone(tracer=tracer).onto(conn) yield conn conn.close() - tracer.shutdown() + tracer.flush() @pytest.mark.asyncio @@ -66,7 +62,7 @@ async def test_queries(snapshot_conn): @pytest.mark.asyncio @pytest.mark.snapshot async def test_pin_override(patched_conn, tracer): - Pin.override(patched_conn, service="db") + Pin._override(patched_conn, service="db") cursor = await patched_conn.cursor() await cursor.execute("SELECT 1") rows = await cursor.fetchall() @@ -82,7 +78,7 @@ async def test_patch_unpatch(tracer, test_spans): service = "fo" conn = await aiomysql.connect(**AIOMYSQL_CONFIG) - Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) + Pin.get_from(conn)._clone(service=service, tracer=tracer).onto(conn) await (await conn.cursor()).execute("select 'dba4x4'") conn.close() @@ -104,7 +100,7 @@ async def test_patch_unpatch(tracer, test_spans): patch() conn = await aiomysql.connect(**AIOMYSQL_CONFIG) - Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) + Pin.get_from(conn)._clone(service=service, tracer=tracer).onto(conn) await (await conn.cursor()).execute("select 'dba4x4'") conn.close() @@ -241,7 +237,7 @@ async def _get_conn_tracer(self, tags=None): assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer, tags={**tags, **pin.tags}).onto(self.conn) + pin._clone(tracer=self.tracer, tags={**tags, **pin.tags}).onto(self.conn) return self.conn, self.tracer diff --git a/tests/contrib/aiopg/test.py b/tests/contrib/aiopg/test.py index eb738e009d8..939aa376570 100644 --- a/tests/contrib/aiopg/test.py +++ b/tests/contrib/aiopg/test.py @@ -39,7 +39,7 @@ def tearDown(self): @pytest.mark.asyncio async def _get_conn_and_tracer(self): conn = self._conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) return conn, self.tracer @@ -159,7 +159,7 @@ async def test_connect_factory(self): services = ["db", "another"] for service in services: conn, _ = await self._get_conn_and_tracer() - Pin.get_from(conn).clone(service=service, tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(service=service, tracer=self.tracer).onto(conn) await self.assert_conn_is_traced(self.tracer, conn, service) conn.close() @@ -172,7 +172,7 @@ async def test_patch_unpatch(self): service = "fo" conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(service=service, tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(service=service, tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -194,7 +194,7 @@ async def test_patch_unpatch(self): patch() conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(service=service, tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(service=service, tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -218,7 +218,7 @@ async def test_user_specified_service_v0(self): assert config.service == "mysvc" conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -240,7 +240,7 @@ async def test_user_specified_service_v1(self): assert config.service == "mysvc" conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -258,7 +258,7 @@ async def test_unspecified_service_v1(self): """ # Ensure that the service name was configured conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -271,7 +271,7 @@ async def test_unspecified_service_v1(self): @run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) async def test_trace_span_name_v0_schema(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -284,7 +284,7 @@ async def test_trace_span_name_v0_schema(self): @run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) async def test_trace_span_name_v1_schema(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) await (await conn.cursor()).execute("select 'blah'") conn.close() @@ -299,7 +299,7 @@ async def test_trace_span_name_v1_schema(self): ) async def test_user_specified_service_integration_v0(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -314,7 +314,7 @@ async def test_user_specified_service_integration_v0(self): ) async def test_user_specified_service_integration_v1(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -327,7 +327,7 @@ async def test_user_specified_service_integration_v1(self): @AsyncioTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) async def test_user_specified_service_env_var_v0(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -340,7 +340,7 @@ async def test_user_specified_service_env_var_v0(self): @AsyncioTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) async def test_user_specified_service_env_var_v1(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -355,7 +355,7 @@ async def test_user_specified_service_env_var_v1(self): ) async def test_pin_override_service_v1(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer, service="override").onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer, service="override").onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -368,7 +368,7 @@ async def test_pin_override_service_v1(self): @AsyncioTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) async def test_span_name_v0_schema(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -381,7 +381,7 @@ async def test_span_name_v0_schema(self): @AsyncioTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) async def test_span_name_v1_schema(self): conn = await aiopg.connect(**POSTGRES_CONFIG) - Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("SELECT 1") @@ -395,7 +395,7 @@ class AiopgAnalyticsTestCase(AiopgTestCase): async def trace_spans(self): conn, _ = await self._get_conn_and_tracer() - Pin.get_from(conn).clone(service="db", tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(service="db", tracer=self.tracer).onto(conn) cursor = await conn.cursor() await cursor.execute("select 'foobar'") diff --git a/tests/contrib/algoliasearch/test.py b/tests/contrib/algoliasearch/test.py index 87f5f7b6910..53603f47287 100644 --- a/tests/contrib/algoliasearch/test.py +++ b/tests/contrib/algoliasearch/test.py @@ -52,7 +52,7 @@ def search(self, query, args=None, request_options=None): def patch_algoliasearch(self): patch() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) def tearDown(self): super(AlgoliasearchTest, self).tearDown() @@ -157,7 +157,7 @@ def test_patch_unpatch(self): def test_patch_all_auto_enable(self): patch_all() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) self.perform_search("test search") spans = self.get_spans() @@ -179,7 +179,7 @@ def test_user_specified_service_default(self): The algoliasearch integration shouldn't use it as the service name """ patch_all() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) self.perform_search("test search") spans = self.get_spans() self.reset() @@ -195,7 +195,7 @@ def test_user_specified_service_v0(self): The algoliasearch integration shouldn't use it as the service name """ patch_all() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) self.perform_search("test search") spans = self.get_spans() self.reset() @@ -211,7 +211,7 @@ def test_user_specified_service_v1(self): so make sure that is used and not the v0 schema 'algoliasearch' """ patch_all() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) self.perform_search("test search") spans = self.get_spans() self.reset() @@ -223,7 +223,7 @@ def test_user_specified_service_v1(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_span_name_v0_schema(self): patch_all() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) self.perform_search("test search") spans = self.get_spans() self.reset() @@ -235,7 +235,7 @@ def test_span_name_v0_schema(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_span_name_v1_schema(self): patch_all() - Pin.override(self.index, tracer=self.tracer) + Pin._override(self.index, tracer=self.tracer) self.perform_search("test search") spans = self.get_spans() self.reset() diff --git a/tests/contrib/anthropic/conftest.py b/tests/contrib/anthropic/conftest.py index 3e5dac0a442..1cb600b0b82 100644 --- a/tests/contrib/anthropic/conftest.py +++ b/tests/contrib/anthropic/conftest.py @@ -36,7 +36,7 @@ def mock_tracer(ddtrace_global_config, anthropic): try: pin = Pin.get_from(anthropic) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) - pin.override(anthropic, tracer=mock_tracer) + pin._override(anthropic, tracer=mock_tracer) pin.tracer._configure() if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use to mock tracer. diff --git a/tests/contrib/aredis/test_aredis.py b/tests/contrib/aredis/test_aredis.py index e62cfa974be..298abdbf85b 100644 --- a/tests/contrib/aredis/test_aredis.py +++ b/tests/contrib/aredis/test_aredis.py @@ -122,7 +122,7 @@ async def test_meta_override(tracer, test_spans): r = aredis.StrictRedis(port=REDIS_CONFIG["port"]) pin = Pin.get_from(r) assert pin is not None - pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(r) + pin._clone(tags={"cheese": "camembert"}, tracer=tracer).onto(r) await r.get("cheese") test_spans.assert_trace_count(1) @@ -162,7 +162,7 @@ async def test(tracer, test_spans): r = aredis.StrictRedis(port=REDIS_CONFIG["port"]) pin = Pin.get_from(r) assert pin is not None - pin.clone(tags={{"cheese": "camembert"}}, tracer=tracer).onto(r) + pin._clone(tags={{"cheese": "camembert"}}, tracer=tracer).onto(r) await r.get("cheese") test_spans.assert_trace_count(1) diff --git a/tests/contrib/asyncio/test_tracer.py b/tests/contrib/asyncio/test_tracer.py index cb474786b72..2304544e62b 100644 --- a/tests/contrib/asyncio/test_tracer.py +++ b/tests/contrib/asyncio/test_tracer.py @@ -4,7 +4,6 @@ import pytest from ddtrace.constants import ERROR_MSG -from ddtrace.contrib.internal.asyncio.compat import asyncio_current_task from ddtrace.contrib.internal.asyncio.patch import patch from ddtrace.contrib.internal.asyncio.patch import unpatch @@ -65,20 +64,6 @@ def test_event_loop_exception(tracer): assert ctx is None -def test_context_task_none(tracer): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - # it should handle the case where a Task is not available - # Note: the @pytest.mark.asyncio is missing to simulate an execution - # without a Task - task = asyncio_current_task() - # the task is not available - assert task is None - - ctx = tracer.current_trace_context() - assert ctx is None - - @pytest.mark.asyncio async def test_exception(tracer): async def f1(): diff --git a/tests/contrib/asyncpg/test_asyncpg.py b/tests/contrib/asyncpg/test_asyncpg.py index 5e5b649faaf..5de995c80aa 100644 --- a/tests/contrib/asyncpg/test_asyncpg.py +++ b/tests/contrib/asyncpg/test_asyncpg.py @@ -158,7 +158,7 @@ async def test_cursor_manual(patched_conn): @pytest.mark.snapshot @pytest.mark.xfail async def test_service_override_pin(patched_conn): - Pin.override(patched_conn, service="custom-svc") + Pin._override(patched_conn, service="custom-svc") await patched_conn.execute("SELECT 1") @@ -351,7 +351,7 @@ async def _get_conn_tracer(self): assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer).onto(self.conn) + pin._clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer @@ -468,7 +468,7 @@ async def test_asyncpg_dbm_propagation_comment_pin_service_name_override(self): db_name = POSTGRES_CONFIG["dbname"] conn, tracer = await self._get_conn_tracer() - Pin.override(conn, service="pin-service-name-override", tracer=tracer) + Pin._override(conn, service="pin-service-name-override", tracer=tracer) def mock_func(args, kwargs, sql_pos, sql_kw, sql_with_dbm_tags): return args, kwargs diff --git a/tests/contrib/avro/test_avro.py b/tests/contrib/avro/test_avro.py index 3db10460a23..b2db731a71e 100644 --- a/tests/contrib/avro/test_avro.py +++ b/tests/contrib/avro/test_avro.py @@ -49,7 +49,7 @@ def test_basic_schema_serialize(avro, tracer, test_spans): pin = Pin.get_from(writer) assert pin is not None - pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(writer) + pin._clone(tags={"cheese": "camembert"}, tracer=tracer).onto(writer) with tracer.trace("basic_avro_schema.serialization") as span: span.context.sampling_priority = AUTO_KEEP @@ -82,7 +82,7 @@ def test_advanced_schema_serialize(avro, tracer, test_spans): pin = Pin.get_from(writer) assert pin is not None - pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(writer) + pin._clone(tags={"cheese": "camembert"}, tracer=tracer).onto(writer) with tracer.trace("advanced_avro_schema.serialization") as span: span.context.sampling_priority = AUTO_KEEP @@ -128,7 +128,7 @@ def test_basic_schema_deserialize(avro, tracer, test_spans): pin = Pin.get_from(reader) assert pin is not None - pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(reader) + pin._clone(tags={"cheese": "camembert"}, tracer=tracer).onto(reader) with tracer.trace("basic_avro_schema.deserialization") as span: span.context.sampling_priority = AUTO_KEEP @@ -160,7 +160,7 @@ def test_advanced_schema_deserialize(avro, tracer, test_spans): pin = Pin.get_from(reader) assert pin is not None - pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(reader) + pin._clone(tags={"cheese": "camembert"}, tracer=tracer).onto(reader) with tracer.trace("advanced_avro_schema.deserialization") as span: span.context.sampling_priority = AUTO_KEEP diff --git a/tests/contrib/boto/test.py b/tests/contrib/boto/test.py index 2570ca9c65c..6da67eac9d3 100644 --- a/tests/contrib/boto/test.py +++ b/tests/contrib/boto/test.py @@ -39,7 +39,9 @@ def setUp(self): @mock_ec2 def test_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ec2) ec2.get_all_instances() spans = self.pop_spans() @@ -77,7 +79,7 @@ def test_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_env_service_default_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -91,7 +93,7 @@ def test_schematized_env_service_default_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_env_service_v0_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -105,7 +107,7 @@ def test_schematized_env_service_v0_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_env_service_v1_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -119,7 +121,7 @@ def test_schematized_env_service_v1_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict()) def test_schematized_unspecified_service_default_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -133,7 +135,7 @@ def test_schematized_unspecified_service_default_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_v0_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -147,7 +149,7 @@ def test_schematized_unspecified_service_v0_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_v1_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -161,7 +163,7 @@ def test_schematized_unspecified_service_v1_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_operation_name_v0_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -175,7 +177,7 @@ def test_schematized_operation_name_v0_ec2_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_operation_name_v1_ec2_client(self): ec2 = boto.ec2.connect_to_region("us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) # Create an instance ec2.run_instances(21) @@ -188,7 +190,9 @@ def test_schematized_operation_name_v1_ec2_client(self): def _test_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.get_all_buckets() spans = self.pop_spans() @@ -247,7 +251,7 @@ def _test_s3_client(self): def test_schematized_env_service_name_default_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -266,7 +270,7 @@ def test_schematized_env_service_name_default_s3_client(self): def test_schematized_env_service_name_v0_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -285,7 +289,7 @@ def test_schematized_env_service_name_v0_s3_client(self): def test_schematized_env_service_name_v1_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -304,7 +308,7 @@ def test_schematized_env_service_name_v1_s3_client(self): def test_schematized_operation_name_v0_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -323,7 +327,7 @@ def test_schematized_operation_name_v0_s3_client(self): def test_schematized_operation_name_v1_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -342,7 +346,7 @@ def test_schematized_operation_name_v1_s3_client(self): def test_schematized_unspecified_service_name_default_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -361,7 +365,7 @@ def test_schematized_unspecified_service_name_default_s3_client(self): def test_schematized_unspecified_service_name_v0_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -380,7 +384,7 @@ def test_schematized_unspecified_service_name_v0_s3_client(self): def test_schematized_unspecified_service_name_v1_s3_client(self): # DEV: To test tag params check create bucket's span s3 = boto.s3.connect_to_region("us-east-1") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) # Create the test bucket s3.create_bucket("cheese") @@ -411,7 +415,9 @@ def test_s3_client_no_params(self): @mock_s3 def test_s3_put(self): s3 = boto.s3.connect_to_region("us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.create_bucket("mybucket") bucket = s3.get_bucket("mybucket") k = boto.s3.key.Key(bucket) @@ -445,7 +451,9 @@ def test_s3_put(self): @mock_lambda def test_unpatch(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(lamb) unpatch() # multiple calls @@ -456,11 +464,12 @@ def test_unpatch(self): @mock_s3 def test_double_patch(self): s3 = boto.s3.connect_to_region("us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) patch() patch() - # Get the created bucket s3.create_bucket("cheese") spans = self.pop_spans() @@ -470,7 +479,9 @@ def test_double_patch(self): @mock_lambda def test_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(lamb) # multiple calls lamb.list_functions() @@ -495,7 +506,7 @@ def test_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_env_service_name_default_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -507,7 +518,7 @@ def test_schematized_env_service_name_default_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_env_service_name_v0_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -519,7 +530,7 @@ def test_schematized_env_service_name_v0_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_env_service_name_v1_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -531,7 +542,7 @@ def test_schematized_env_service_name_v1_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_unspecified_service_name_default_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -543,7 +554,7 @@ def test_schematized_unspecified_service_name_default_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_name_v0_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -555,7 +566,7 @@ def test_schematized_unspecified_service_name_v0_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_name_v1_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -567,7 +578,7 @@ def test_schematized_unspecified_service_name_v1_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_operation_name_v0_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -579,7 +590,7 @@ def test_schematized_operation_name_v0_lambda_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_operation_name_v1_lambda_client(self): lamb = boto.awslambda.connect_to_region("us-east-2") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.list_functions() @@ -590,7 +601,9 @@ def test_schematized_operation_name_v1_lambda_client(self): @mock_sts def test_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sts) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sts) sts.get_federation_token(12, duration=10) @@ -610,7 +623,7 @@ def test_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_env_default_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -624,7 +637,7 @@ def test_schematized_env_default_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_env_v0_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -638,7 +651,7 @@ def test_schematized_env_v0_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_env_v1_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -652,7 +665,7 @@ def test_schematized_env_v1_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict()) def test_schematized_unspecified_service_default_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -666,7 +679,7 @@ def test_schematized_unspecified_service_default_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_v0_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -680,7 +693,7 @@ def test_schematized_unspecified_service_v0_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -694,7 +707,7 @@ def test_schematized_unspecified_service_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_operation_name_v0_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -708,7 +721,7 @@ def test_schematized_operation_name_v0_sts_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_operation_name_sts_client(self): sts = boto.sts.connect_to_region("us-west-2") - Pin.get_from(sts).clone(tracer=self.tracer).onto(sts) + Pin.get_from(sts)._clone(tracer=self.tracer).onto(sts) sts.get_federation_token(12, duration=10) @@ -727,7 +740,9 @@ def test_schematized_operation_name_sts_client(self): ) def test_elasticache_client(self): elasticache = boto.elasticache.connect_to_region("us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(elasticache) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(elasticache) elasticache.describe_cache_clusters() @@ -746,7 +761,9 @@ def test_ec2_client_ot(self): """OpenTracing compatibility check of the test_ec2_client test.""" ec2 = boto.ec2.connect_to_region("us-west-2") ot_tracer = init_tracer("my_svc", self.tracer) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ec2) with ot_tracer.start_active_span("ot_span"): ec2.get_all_instances() diff --git a/tests/contrib/botocore/test.py b/tests/contrib/botocore/test.py index 67eaaf55fc3..cb1a06bec1c 100644 --- a/tests/contrib/botocore/test.py +++ b/tests/contrib/botocore/test.py @@ -104,7 +104,9 @@ def setUp(self): super(BotocoreTest, self).setUp() - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(botocore.parsers.ResponseParser) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(botocore.parsers.ResponseParser) # Setting the validated flag to False ensures the redaction paths configurations are re-validated # FIXME: Ensure AWSPayloadTagging._REQUEST_REDACTION_PATHS_DEFAULTS is always in sync with # config.botocore.payload_tagging_request @@ -123,7 +125,9 @@ def tearDown(self): def test_patch_submodules(self): patch_submodules(["s3"]) ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ec2) ec2.describe_instances() @@ -131,7 +135,9 @@ def test_patch_submodules(self): assert spans == [] s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.list_buckets() s3.list_buckets() @@ -142,7 +148,9 @@ def test_patch_submodules(self): @mock_ec2 def test_traced_client(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ec2) ec2.describe_instances() @@ -169,7 +177,7 @@ def test_traced_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_ec2_call_default(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) ec2.describe_instances() @@ -182,7 +190,7 @@ def test_schematized_ec2_call_default(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_ec2_call_v0(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) ec2.describe_instances() @@ -195,7 +203,7 @@ def test_schematized_ec2_call_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_ec2_call_v1(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) ec2.describe_instances() @@ -208,7 +216,7 @@ def test_schematized_ec2_call_v1(self): @TracerTestCase.run_in_subprocess(env_overrides=dict()) def test_schematized_unspecified_service_ec2_call_default(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) ec2.describe_instances() @@ -221,7 +229,7 @@ def test_schematized_unspecified_service_ec2_call_default(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_ec2_call_v0(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) ec2.describe_instances() @@ -234,7 +242,7 @@ def test_schematized_unspecified_service_ec2_call_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_ec2_call_v1(self): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin.get_from(ec2).clone(tracer=self.tracer).onto(ec2) + Pin.get_from(ec2)._clone(tracer=self.tracer).onto(ec2) ec2.describe_instances() @@ -252,7 +260,9 @@ def test_schematized_unspecified_service_ec2_call_v1(self): @mock_dynamodb def test_dynamodb_put_get(self): ddb = self.session.create_client("dynamodb", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ddb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ddb) with self.override_config("botocore", dict(instrument_internals=True)): ddb.create_table( @@ -296,7 +306,9 @@ def test_dynamodb_put_get(self): @mock_dynamodb def test_dynamodb_put_get_with_table_primary_key_mapping(self): ddb = self.session.create_client("dynamodb", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ddb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ddb) with self.override_config( "botocore", @@ -358,7 +370,9 @@ def test_dynamodb_put_get_with_table_primary_key_mapping(self): @mock_dynamodb def test_dynamodb_put_get_with_broken_table_primary_key_mapping(self): ddb = self.session.create_client("dynamodb", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ddb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ddb) with self.override_config( "botocore", @@ -407,7 +421,9 @@ def test_dynamodb_put_get_with_broken_table_primary_key_mapping(self): @mock_s3 def test_s3_client(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.list_buckets() s3.list_buckets() @@ -444,7 +460,9 @@ def test_s3_head_404_default(self): API calls with a 404 response """ s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) # We need a bucket for this test s3.create_bucket(Bucket="test", CreateBucketConfiguration=dict(LocationConstraint="us-west-2")) @@ -472,7 +490,9 @@ def test_s3_head_404_as_errors(self): we attach exception information to S3 HeadObject 404 responses """ s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) # We need a bucket for this test s3.create_bucket(Bucket="test", CreateBucketConfiguration=dict(LocationConstraint="us-west-2")) @@ -500,7 +520,9 @@ def test_s3_head_404_as_errors(self): def _test_s3_put(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) params = { "Bucket": "mybucket", "CreateBucketConfiguration": { @@ -581,7 +603,7 @@ def test_s3_put_no_params(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_BOTOCORE_SERVICE="botocore")) def test_service_name_override(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) params = { "Bucket": "mybucket", @@ -612,7 +634,7 @@ def test_service_name_override(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_s3_client_default(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) s3.list_buckets() @@ -626,7 +648,7 @@ def test_schematized_s3_client_default(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_s3_client_v0(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) s3.list_buckets() @@ -640,7 +662,7 @@ def test_schematized_s3_client_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_s3_client_v1(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) s3.list_buckets() @@ -654,7 +676,7 @@ def test_schematized_s3_client_v1(self): @TracerTestCase.run_in_subprocess(env_overrides=dict()) def test_schematized_unspecified_service_s3_client_default(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) s3.list_buckets() @@ -668,7 +690,7 @@ def test_schematized_unspecified_service_s3_client_default(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_s3_client_v0(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) s3.list_buckets() @@ -682,7 +704,7 @@ def test_schematized_unspecified_service_s3_client_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_s3_client_v1(self): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin.get_from(s3).clone(tracer=self.tracer).onto(s3) + Pin.get_from(s3)._clone(tracer=self.tracer).onto(s3) s3.list_buckets() @@ -695,7 +717,9 @@ def test_schematized_unspecified_service_s3_client_v1(self): def _test_sqs_client(self): self.sqs_client.delete_queue(QueueUrl=self.queue_name) # Delete so we can test create_queue spans - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) self.sqs_test_queue = self.sqs_client.create_queue(QueueName=self.queue_name) spans = self.get_spans() @@ -731,7 +755,9 @@ def test_sqs_client_no_params(self): @mock_sqs def test_sqs_send_message_non_url_queue(self): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) self.sqs_client.send_message(QueueUrl="Test", MessageBody="world") spans = self.get_spans() @@ -744,7 +770,9 @@ def test_sqs_send_message_non_url_queue(self): @mock_sqs def test_sqs_send_message_distributed_tracing_off(self): with self.override_config("botocore", dict(distributed_tracing=False)): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") spans = self.get_spans() @@ -774,7 +802,9 @@ def test_sqs_send_message_distributed_tracing_off(self): @mock_sqs def test_sqs_send_message_distributed_tracing_on(self): with self.override_config("botocore", dict(distributed_tracing=True, propagation_enabled=True)): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") spans = self.get_spans() @@ -851,8 +881,8 @@ def _test_distributed_tracing_sns_to_sqs(self, raw_message_delivery): AttributeValue="true", ) - Pin.get_from(sns).clone(tracer=self.tracer).onto(sns) - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(sns)._clone(tracer=self.tracer).onto(sns) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) sns.publish(TopicArn=topic_arn, Message="test") @@ -891,7 +921,9 @@ def _test_distributed_tracing_sns_to_sqs(self, raw_message_delivery): @mock_sqs def test_sqs_send_message_trace_injection_with_max_message_attributes(self): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, "two": {"DataType": "String", "StringValue": "two"}, @@ -934,7 +966,9 @@ def test_sqs_send_message_trace_injection_with_max_message_attributes(self): @mock_sqs def test_sqs_send_message_batch_trace_injection_with_no_message_attributes(self): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) entries = [ { "Id": "1", @@ -969,7 +1003,9 @@ def test_sqs_send_message_batch_trace_injection_with_no_message_attributes(self) @mock_sqs def test_sqs_send_message_batch_trace_injection_with_message_attributes(self): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) entries = [ { "Id": "1", @@ -1016,7 +1052,9 @@ def test_sqs_send_message_batch_trace_injection_with_message_attributes(self): @mock_sqs def test_sqs_send_message_batch_trace_injection_with_max_message_attributes(self): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) entries = [ { "Id": "1", @@ -1063,7 +1101,7 @@ def test_sqs_send_message_batch_trace_injection_with_max_message_attributes(self @mock_sqs @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_sqs_client_default(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") self.sqs_client.send_message_batch( QueueUrl=self.sqs_test_queue["QueueUrl"], Entries=[{"Id": "1", "MessageBody": "hello"}] @@ -1085,7 +1123,7 @@ def test_schematized_sqs_client_default(self): @mock_sqs @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_sqs_client_v0(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") self.sqs_client.send_message_batch( @@ -1108,7 +1146,7 @@ def test_schematized_sqs_client_v0(self): @mock_sqs @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_sqs_client_v1(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") self.sqs_client.send_message_batch( @@ -1131,7 +1169,7 @@ def test_schematized_sqs_client_v1(self): @mock_sqs @TracerTestCase.run_in_subprocess(env_overrides=dict()) def test_schematized_unspecified_service_sqs_client_default(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") self.sqs_client.send_message_batch( @@ -1154,7 +1192,7 @@ def test_schematized_unspecified_service_sqs_client_default(self): @mock_sqs @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_sqs_client_v0(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") self.sqs_client.send_message_batch( @@ -1177,7 +1215,7 @@ def test_schematized_unspecified_service_sqs_client_v0(self): @mock_sqs @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_sqs_client_v1(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") self.sqs_client.send_message_batch( @@ -1205,7 +1243,9 @@ def test_stepfunctions_send_start_execution_trace_injection(self): definition='{"StartAt": "HelloWorld","States": {"HelloWorld": {"Type": "Pass","End": true}}}', roleArn="arn:aws:iam::012345678901:role/DummyRole", ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sf) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sf) start_execution_dict = { "stateMachineArn": "arn:aws:states:us-west-2:000000000000:stateMachine:lincoln", "input": '{"baz": 1}', @@ -1226,7 +1266,9 @@ def test_stepfunctions_send_start_execution_trace_injection_with_array_input(sel definition='{"StartAt": "HelloWorld","States": {"HelloWorld": {"Type": "Pass","End": true}}}', roleArn="arn:aws:iam::012345678901:role/DummyRole", ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sf) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sf) sf.start_execution( stateMachineArn="arn:aws:states:us-west-2:000000000000:stateMachine:miller", input='["one", "two", "three"]' ) @@ -1245,7 +1287,9 @@ def test_stepfunctions_send_start_execution_trace_injection_with_true_input(self definition='{"StartAt": "HelloWorld","States": {"HelloWorld": {"Type": "Pass","End": true}}}', roleArn="arn:aws:iam::012345678901:role/DummyRole", ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sf) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sf) sf.start_execution(stateMachineArn="arn:aws:states:us-west-2:000000000000:stateMachine:hobart", input="true") # I've tried to find a way to make Moto show me the input to the execution, but can't get that to work. spans = self.get_spans() @@ -1264,7 +1308,9 @@ def _test_kinesis_client(self): {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) client.put_records(StreamName=stream_name, Records=data) spans = self.get_spans() @@ -1335,7 +1381,9 @@ def test_kinesis_distributed_tracing_on(self): @mock_kinesis def test_unpatch(self): kinesis = self.session.create_client("kinesis", region_name="us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(kinesis) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(kinesis) unpatch() @@ -1345,7 +1393,9 @@ def test_unpatch(self): @mock_sqs def test_double_patch(self): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) patch() patch() @@ -1402,8 +1452,8 @@ def _test_data_streams_sns_to_sqs(self, use_raw_delivery): AttributeValue="true", ) - Pin.get_from(sns).clone(tracer=self.tracer).onto(sns) - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(sns)._clone(tracer=self.tracer).onto(sns) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) sns.publish(TopicArn=topic_arn, Message="test") @@ -1478,7 +1528,9 @@ def test_data_streams_sqs(self): ): mt.return_value = 1642544540 - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, "two": {"DataType": "String", "StringValue": "two"}, @@ -1537,7 +1589,9 @@ def test_data_streams_sqs_batch(self): ): mt.return_value = 1642544540 - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, "two": {"DataType": "String", "StringValue": "two"}, @@ -1615,7 +1669,9 @@ def test_data_streams_sqs_no_header(self): ): mt.return_value = 1642544540 - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, "two": {"DataType": "String", "StringValue": "two"}, @@ -1655,7 +1711,9 @@ def test_data_streams_sqs_no_header(self): def test_lambda_client(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(lamb) lamb.list_functions() @@ -1691,7 +1749,9 @@ def test_lambda_invoke_distributed_tracing_off(self): MemorySize=128, ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(lamb) lamb.invoke( FunctionName="ironmaiden", @@ -1731,7 +1791,9 @@ def test_lambda_invoke_bad_context_client(self): MemorySize=128, ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(lamb) lamb.invoke( FunctionName="black-sabbath", @@ -1757,7 +1819,7 @@ def test_schematized_lambda_client_default(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2", endpoint_url="http://localhost:4566") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.create_function( FunctionName="guns-and-roses", Runtime="python3.8", @@ -1791,7 +1853,7 @@ def test_schematized_lambda_client_default(self): def test_schematized_lambda_client_v0(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2", endpoint_url="http://localhost:4566") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.create_function( FunctionName="guns-and-roses", @@ -1822,7 +1884,7 @@ def test_schematized_lambda_client_v0(self): def test_schematized_lambda_client_v1(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2", endpoint_url="http://localhost:4566") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.create_function( FunctionName="guns-and-roses", @@ -1853,7 +1915,7 @@ def test_schematized_lambda_client_v1(self): def test_schematized_unspecified_service_lambda_client_default(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2", endpoint_url="http://localhost:4566") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.create_function( FunctionName="guns-and-roses", @@ -1884,7 +1946,7 @@ def test_schematized_unspecified_service_lambda_client_default(self): def test_schematized_unspecified_service_lambda_client_v0(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2", endpoint_url="http://localhost:4566") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.create_function( FunctionName="guns-and-roses", @@ -1915,7 +1977,7 @@ def test_schematized_unspecified_service_lambda_client_v0(self): def test_schematized_unspecified_service_lambda_client_v1(self): # DEV: No lambda params tagged so we only check no ClientContext lamb = self.session.create_client("lambda", region_name="us-west-2", endpoint_url="http://localhost:4566") - Pin.get_from(lamb).clone(tracer=self.tracer).onto(lamb) + Pin.get_from(lamb)._clone(tracer=self.tracer).onto(lamb) lamb.create_function( FunctionName="guns-and-roses", @@ -1968,7 +2030,9 @@ def test_eventbridge_single_entry_trace_injection(self): Targets=[{"Id": "a-test-bus-rule-target", "Arn": "arn:aws:sqs:us-east-1:000000000000:Test"}], ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(bridge) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(bridge) bridge.put_events(Entries=entries) messages = self.sqs_client.receive_message(QueueUrl=queue_url, WaitTimeSeconds=2) @@ -2030,7 +2094,9 @@ def test_eventbridge_multiple_entries_trace_injection(self): Targets=[{"Id": "a-test-bus-rule-target", "Arn": "arn:aws:sqs:us-east-1:000000000000:Test"}], ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(bridge) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(bridge) bridge.put_events(Entries=entries) messages = self.sqs_client.receive_message(QueueUrl=queue_url, WaitTimeSeconds=2) @@ -2070,7 +2136,9 @@ def test_eventbridge_multiple_entries_trace_injection(self): @mock_kms def test_kms_client(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(kms) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(kms) kms.list_keys(Limit=21) @@ -2095,7 +2163,7 @@ def test_kms_client(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) def test_schematized_kms_client_default(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin.get_from(kms).clone(tracer=self.tracer).onto(kms) + Pin.get_from(kms)._clone(tracer=self.tracer).onto(kms) kms.list_keys(Limit=21) @@ -2110,7 +2178,7 @@ def test_schematized_kms_client_default(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_kms_client_v0(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin.get_from(kms).clone(tracer=self.tracer).onto(kms) + Pin.get_from(kms)._clone(tracer=self.tracer).onto(kms) kms.list_keys(Limit=21) @@ -2125,7 +2193,7 @@ def test_schematized_kms_client_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_kms_client_v1(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin.get_from(kms).clone(tracer=self.tracer).onto(kms) + Pin.get_from(kms)._clone(tracer=self.tracer).onto(kms) kms.list_keys(Limit=21) @@ -2140,7 +2208,7 @@ def test_schematized_kms_client_v1(self): @TracerTestCase.run_in_subprocess(env_overrides=dict()) def test_schematized_unspecified_service_kms_client_default(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin.get_from(kms).clone(tracer=self.tracer).onto(kms) + Pin.get_from(kms)._clone(tracer=self.tracer).onto(kms) kms.list_keys(Limit=21) @@ -2155,7 +2223,7 @@ def test_schematized_unspecified_service_kms_client_default(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) def test_schematized_unspecified_service_kms_client_v0(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin.get_from(kms).clone(tracer=self.tracer).onto(kms) + Pin.get_from(kms)._clone(tracer=self.tracer).onto(kms) kms.list_keys(Limit=21) @@ -2170,7 +2238,7 @@ def test_schematized_unspecified_service_kms_client_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_schematized_unspecified_service_kms_client_v1(self): kms = self.session.create_client("kms", region_name="us-east-1") - Pin.get_from(kms).clone(tracer=self.tracer).onto(kms) + Pin.get_from(kms)._clone(tracer=self.tracer).onto(kms) kms.list_keys(Limit=21) @@ -2188,7 +2256,9 @@ def test_traced_client_ot(self): with ot_tracer.start_active_span("ec2_op"): ec2 = self.session.create_client("ec2", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(ec2) ec2.describe_instances() spans = self.get_spans() @@ -2235,7 +2305,9 @@ def test_stubber_no_response_metadata(self): @mock_firehose def test_firehose_no_records_arg(self): firehose = self.session.create_client("firehose", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(firehose) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(firehose) stream_name = "test-stream" account_id = "test-account" @@ -2309,9 +2381,11 @@ def _test_sns(self, use_default_tracer=False): sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) if use_default_tracer: - Pin.get_from(sns).clone(tracer=self.tracer).onto(sns) + Pin.get_from(sns)._clone(tracer=self.tracer).onto(sns) else: - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) sns.publish(TopicArn=topic_arn, Message="test") spans = self.get_spans() @@ -2410,7 +2484,9 @@ def test_sns_send_message_trace_injection_with_no_message_attributes(self): sqs_arn = "arn:aws:sqs:{}:{}:{}".format("us-east-1", url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) sns.publish(TopicArn=topic_arn, Message="test") spans = self.get_spans() @@ -2469,7 +2545,9 @@ def test_sns_send_message_trace_injection_with_message_attributes(self): sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, @@ -2544,7 +2622,9 @@ def test_sns_send_message_trace_injection_with_max_message_attributes(self): sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, @@ -2610,8 +2690,10 @@ def test_sns_send_message_batch_trace_injection_with_no_message_attributes(self) sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) - Pin.get_from(sns).clone(tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) + Pin.get_from(sns)._clone(tracer=self.tracer).onto(self.sqs_client) entries = [ { "Id": "1", @@ -2704,7 +2786,9 @@ def test_sns_send_message_batch_trace_injection_with_message_attributes(self): sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, @@ -2775,7 +2859,9 @@ def test_sns_send_message_batch_trace_injection_with_max_message_attributes(self sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, @@ -2902,7 +2988,9 @@ def test_kinesis_get_records_empty_poll_disabled(self): stream_name = "kinesis_get_records_empty_poll_disabled" shard_id, _ = self._kinesis_create_stream(client, stream_name) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) shard_iterator = self._kinesis_get_shard_iterator(client, stream_name, shard_id) @@ -2927,7 +3015,9 @@ def test_kinesis_get_records_empty_poll_enabled(self): stream_name = "kinesis_get_records_empty_poll_enabled" shard_id, _ = self._kinesis_create_stream(client, stream_name) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) shard_iterator = self._kinesis_get_shard_iterator(client, stream_name, shard_id) @@ -2950,7 +3040,9 @@ def test_sqs_get_records_empty_poll_disabled(self): # pop any spans created from previous operations spans = self.pop_spans() - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) response = None response = self.sqs_client.receive_message( @@ -2971,7 +3063,9 @@ def test_sqs_get_records_empty_poll_enabled(self): # pop any spans created from previous operations spans = self.pop_spans() - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) response = None response = self.sqs_client.receive_message( @@ -2994,7 +3088,9 @@ def _test_kinesis_put_record_trace_injection(self, test_name, data, client=None, partition_key = "1234" - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) if enable_stream_arn: client.put_record(StreamName=stream_name, Data=data, PartitionKey=partition_key, StreamARN=stream_arn) else: @@ -3029,7 +3125,9 @@ def _test_kinesis_put_records_trace_injection( stream_name = "kinesis_put_records_" + test_name shard_id, stream_arn = self._kinesis_create_stream(client, stream_name) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) if enable_stream_arn: client.put_records(StreamName=stream_name, Records=data, StreamARN=stream_arn) else: @@ -3328,7 +3426,9 @@ def test_kinesis_put_records_inject_data_streams_to_every_record_propagation_dis data = json.dumps({"json": "string"}) records = self._kinesis_generate_records(data, 5) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) client.put_records(StreamName=stream_name, Records=records, StreamARN=stream_arn) shard_iterator = self._kinesis_get_shard_iterator(client, stream_name, shard_id) @@ -3357,7 +3457,9 @@ def test_kinesis_put_records_inject_data_streams_to_every_record_propagation_ena data = json.dumps({"json": "string"}) records = self._kinesis_generate_records(data, 5) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) client.put_records(StreamName=stream_name, Records=records, StreamARN=stream_arn) shard_iterator = self._kinesis_get_shard_iterator(client, stream_name, shard_id) @@ -3386,7 +3488,9 @@ def test_kinesis_put_records_inject_data_streams_to_every_record_disable_all_inj data = json.dumps({"json": "string"}) records = self._kinesis_generate_records(data, 5) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) client.put_records(StreamName=stream_name, Records=records, StreamARN=stream_arn) shard_iterator = self._kinesis_get_shard_iterator(client, stream_name, shard_id) @@ -3454,7 +3558,7 @@ def test_kinesis_parenting(self): {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) with self.tracer.trace("kinesis.manual_span"): client.create_stream(StreamName=stream_name, ShardCount=1) @@ -3474,7 +3578,7 @@ def test_kinesis_parenting(self): @mock_sqs def test_sqs_parenting(self): - Pin.get_from(self.sqs_client).clone(tracer=self.tracer).onto(self.sqs_client) + Pin.get_from(self.sqs_client)._clone(tracer=self.tracer).onto(self.sqs_client) with self.tracer.trace("sqs.manual_span"): self.sqs_client.send_message(QueueUrl=self.sqs_test_queue["QueueUrl"], MessageBody="world") @@ -3510,7 +3614,7 @@ def test_schematized_kinesis_client_default(self): {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client.create_stream(StreamName=stream_name, ShardCount=1) client.put_records(StreamName=stream_name, Records=data) @@ -3531,7 +3635,7 @@ def test_schematized_kinesis_client_v0(self): {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client.create_stream(StreamName=stream_name, ShardCount=1) client.put_records(StreamName=stream_name, Records=data) @@ -3552,7 +3656,7 @@ def test_schematized_kinesis_client_v1(self): {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client.create_stream(StreamName=stream_name, ShardCount=1) client.put_records(StreamName=stream_name, Records=data) @@ -3573,7 +3677,7 @@ def test_schematized_unspecified_service_kinesis_client_default(self): {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client.create_stream(StreamName=stream_name, ShardCount=1) client.put_records(StreamName=stream_name, Records=data) @@ -3594,7 +3698,7 @@ def test_schematized_unspecified_service_kinesis_client_v0(self): {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client.create_stream(StreamName=stream_name, ShardCount=1) client.put_records(StreamName=stream_name, Records=data) @@ -3615,7 +3719,7 @@ def test_schematized_unspecified_service_kinesis_client_v1(self): {"Data": json.dumps({"Hello": "World"}), "PartitionKey": partition_key}, {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client.create_stream(StreamName=stream_name, ShardCount=1) client.put_records(StreamName=stream_name, Records=data) @@ -3630,7 +3734,9 @@ def test_secretsmanager(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3655,7 +3761,9 @@ def test_secretsmanager_binary(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) resp = client.create_secret(Name="/my/secrets", SecretBinary=b"supersecret-binary") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3681,7 +3789,7 @@ def test_schematized_secretsmanager_default(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3699,7 +3807,7 @@ def test_schematized_secretsmanager_v0(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3717,7 +3825,7 @@ def test_schematized_secretsmanager_v1(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3735,7 +3843,7 @@ def test_schematized_unspecified_service_secretsmanager_default(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3753,7 +3861,7 @@ def test_schematized_unspecified_service_secretsmanager_v0(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3771,7 +3879,7 @@ def test_schematized_unspecified_service_secretsmanager_v1(self): with mock_secretsmanager(): client = self.session.create_client("secretsmanager", region_name="us-east-1") - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) resp = client.create_secret(Name="/my/secrets", SecretString="supersecret-string") assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 @@ -3787,7 +3895,9 @@ def test_schematized_unspecified_service_secretsmanager_v1(self): @mock_sqs def test_aws_payload_tagging_sqs(self): with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.sqs_client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(self.sqs_client) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, "two": {"DataType": "String", "StringValue": "two"}, @@ -3844,7 +3954,9 @@ def test_aws_payload_tagging_sns(self): sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, @@ -3897,7 +4009,9 @@ def test_aws_payload_tagging_sns_valid_config(self): sqs_arn = "arn:aws:sqs:{}:{}:{}".format(region, url_parts[-2], url_parts[-1]) sns.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=sqs_arn) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sns) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(sns) message_attributes = { "one": {"DataType": "String", "StringValue": "one"}, @@ -3933,7 +4047,9 @@ def test_aws_payload_tagging_sns_valid_config(self): def test_aws_payload_tagging_s3(self): with self.override_config("botocore", dict(payload_tagging_request="all", payload_tagging_response="all")): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.list_buckets() s3.list_buckets() @@ -3965,7 +4081,9 @@ def test_aws_payload_tagging_s3_invalid_config(self): dict(payload_tagging_request="non_json_path", payload_tagging_response="$..Attr ibutes.PlatformCredential"), ): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.list_buckets() s3.list_buckets() @@ -3983,7 +4101,9 @@ def test_aws_payload_tagging_s3_valid_config(self): "botocore", dict(payload_tagging_request="$..bucket", payload_tagging_response="$..HTTPHeaders") ): s3 = self.session.create_client("s3", region_name="us-west-2") - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(s3) s3.list_buckets() s3.list_buckets() @@ -4028,7 +4148,9 @@ def test_aws_payload_tagging_eventbridge(self): Targets=[{"Id": "a-test-bus-rule-target", "Arn": "arn:aws:sqs:us-east-1:000000000000:Test"}], ) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(bridge) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(bridge) bridge.put_events(Entries=entries) self.sqs_client.receive_message(QueueUrl=queue_url, WaitTimeSeconds=2) @@ -4048,7 +4170,7 @@ def test_aws_payload_tagging_kinesis(self): {"Data": json.dumps({"foo": "bar"}), "PartitionKey": partition_key}, ] - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) with self.tracer.trace("kinesis.manual_span"): client.create_stream(StreamName=stream_name, ShardCount=1) diff --git a/tests/contrib/botocore/test_bedrock.py b/tests/contrib/botocore/test_bedrock.py index 1cf5618bd0e..578c34ce981 100644 --- a/tests/contrib/botocore/test_bedrock.py +++ b/tests/contrib/botocore/test_bedrock.py @@ -42,7 +42,7 @@ def aws_credentials(): def mock_tracer(ddtrace_global_config, bedrock_client): pin = Pin.get_from(bedrock_client) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) - pin.override(bedrock_client, tracer=mock_tracer) + pin._override(bedrock_client, tracer=mock_tracer) yield mock_tracer @@ -102,7 +102,7 @@ def setUp(self): self.bedrock_client = self.session.client("bedrock-runtime") self.mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin = Pin.get_from(self.bedrock_client) - pin.override(self.bedrock_client, tracer=self.mock_tracer) + pin._override(self.bedrock_client, tracer=self.mock_tracer) super(TestBedrockConfig, self).setUp() diff --git a/tests/contrib/botocore/test_bedrock_llmobs.py b/tests/contrib/botocore/test_bedrock_llmobs.py index 790b86f0704..711f1287f99 100644 --- a/tests/contrib/botocore/test_bedrock_llmobs.py +++ b/tests/contrib/botocore/test_bedrock_llmobs.py @@ -107,7 +107,7 @@ def expected_llmobs_span_event(span, n_output, message=False): def _test_llmobs_invoke(cls, provider, bedrock_client, mock_llmobs_span_writer, cassette_name=None, n_output=1): mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin = Pin.get_from(bedrock_client) - pin.override(bedrock_client, tracer=mock_tracer) + pin._override(bedrock_client, tracer=mock_tracer) # Need to disable and re-enable LLMObs service to use the mock tracer LLMObs.disable() LLMObs.enable(_tracer=mock_tracer, integrations_enabled=False) # only want botocore patched @@ -148,7 +148,7 @@ def _test_llmobs_invoke_stream( ): mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin = Pin.get_from(bedrock_client) - pin.override(bedrock_client, tracer=mock_tracer) + pin._override(bedrock_client, tracer=mock_tracer) # Need to disable and re-enable LLMObs service to use the mock tracer LLMObs.disable() LLMObs.enable(_tracer=mock_tracer, integrations_enabled=False) # only want botocore patched @@ -249,7 +249,7 @@ def test_llmobs_error(self, ddtrace_global_config, bedrock_client, mock_llmobs_s mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) pin = Pin.get_from(bedrock_client) - pin.override(bedrock_client, tracer=mock_tracer) + pin._override(bedrock_client, tracer=mock_tracer) # Need to disable and re-enable LLMObs service to use the mock tracer LLMObs.disable() LLMObs.enable(_tracer=mock_tracer, integrations_enabled=False) # only want botocore patched diff --git a/tests/contrib/cassandra/test.py b/tests/contrib/cassandra/test.py index 21b98d6396f..e63572076ee 100644 --- a/tests/contrib/cassandra/test.py +++ b/tests/contrib/cassandra/test.py @@ -360,7 +360,7 @@ def setUp(self): def _traced_session(self): tracer = DummyTracer() - Pin.get_from(self.cluster).clone(tracer=tracer).onto(self.cluster) + Pin.get_from(self.cluster)._clone(tracer=tracer).onto(self.cluster) return self.cluster.connect(self.TEST_KEYSPACE), tracer @@ -379,7 +379,9 @@ def setUp(self): def _traced_session(self): tracer = DummyTracer() # pin the global Cluster to test if they will conflict - Pin(service=self.TEST_SERVICE, tracer=tracer).onto(Cluster) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = tracer + pin.onto(Cluster) self.cluster = Cluster(port=CASSANDRA_CONFIG["port"]) return self.cluster.connect(self.TEST_KEYSPACE), tracer @@ -403,7 +405,9 @@ def _traced_session(self): Pin(service="not-%s" % self.TEST_SERVICE).onto(Cluster) self.cluster = Cluster(port=CASSANDRA_CONFIG["port"]) - Pin(service=self.TEST_SERVICE, tracer=tracer).onto(self.cluster) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = tracer + pin.onto(self.cluster) return self.cluster.connect(self.TEST_KEYSPACE), tracer def test_patch_unpatch(self): @@ -412,7 +416,7 @@ def test_patch_unpatch(self): patch() tracer = DummyTracer() - Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) + Pin.get_from(Cluster)._clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG["port"]).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) @@ -432,7 +436,7 @@ def test_patch_unpatch(self): # Test patch again patch() - Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) + Pin.get_from(Cluster)._clone(tracer=tracer).onto(Cluster) session = Cluster(port=CASSANDRA_CONFIG["port"]).connect(self.TEST_KEYSPACE) session.execute(self.TEST_QUERY) @@ -454,7 +458,7 @@ def setUp(self): patch() self.tracer = DummyTracer() self.cluster = Cluster(port=CASSANDRA_CONFIG["port"]) - Pin.get_from(self.cluster).clone(tracer=self.tracer).onto(self.cluster) + Pin.get_from(self.cluster)._clone(tracer=self.tracer).onto(self.cluster) self.session = self.cluster.connect(self.TEST_KEYSPACE) def tearDown(self): diff --git a/tests/contrib/celery/base.py b/tests/contrib/celery/base.py index c2b7de22a54..040566ee57d 100644 --- a/tests/contrib/celery/base.py +++ b/tests/contrib/celery/base.py @@ -88,10 +88,11 @@ def celery_test_setup(self): def setUp(self): super(CeleryBaseTestCase, self).setUp() - self.pin = Pin(service="celery-unittest", tracer=self.tracer) + self.pin = Pin(service="celery-unittest") + self.pin._tracer = self.tracer # override pins to use our Dummy Tracer - Pin.override(self.app, tracer=self.tracer) - Pin.override(celery.beat.Scheduler, tracer=self.tracer) + Pin._override(self.app, tracer=self.tracer) + Pin._override(celery.beat.Scheduler, tracer=self.tracer) def tearDown(self): self.app = None diff --git a/tests/contrib/celery/test_tagging.py b/tests/contrib/celery/test_tagging.py index 6b88acf9434..d68ab3adbcb 100644 --- a/tests/contrib/celery/test_tagging.py +++ b/tests/contrib/celery/test_tagging.py @@ -61,14 +61,14 @@ def dummy_tracer(): @pytest.fixture(autouse=False) def traced_redis_celery_app(instrument_celery, dummy_tracer): Pin.get_from(redis_celery_app) - Pin.override(redis_celery_app, tracer=dummy_tracer) + Pin._override(redis_celery_app, tracer=dummy_tracer) yield redis_celery_app @pytest.fixture(autouse=False) def traced_amqp_celery_app(instrument_celery, dummy_tracer): Pin.get_from(amqp_celery_app) - Pin.override(amqp_celery_app, tracer=dummy_tracer) + Pin._override(amqp_celery_app, tracer=dummy_tracer) yield amqp_celery_app diff --git a/tests/contrib/config.py b/tests/contrib/config.py index 6ed086109a2..0b5f3d2bfbb 100644 --- a/tests/contrib/config.py +++ b/tests/contrib/config.py @@ -97,3 +97,13 @@ "host": os.getenv("TEST_KAFKA_HOST", "127.0.0.1"), "port": int(os.getenv("TEST_KAFKA_PORT", 29092)), } + +VALKEY_CONFIG = { + "host": os.getenv("TEST_VALKEY_HOST", "localhost"), + "port": int(os.getenv("TEST_VALKEY_PORT", 6379)), +} + +VALKEY_CLUSTER_CONFIG = { + "host": "127.0.0.1", + "ports": os.getenv("TEST_VALKEYCLUSTER_PORTS", "7000,7001,7002,7003,7004,7005"), +} diff --git a/tests/contrib/consul/test.py b/tests/contrib/consul/test.py index 285287f9e95..71e2f0c6da0 100644 --- a/tests/contrib/consul/test.py +++ b/tests/contrib/consul/test.py @@ -22,8 +22,8 @@ def setUp(self): host=CONSUL_CONFIG["host"], port=CONSUL_CONFIG["port"], ) - Pin.override(consul.Consul, service=self.TEST_SERVICE, tracer=self.tracer) - Pin.override(consul.Consul.KV, service=self.TEST_SERVICE, tracer=self.tracer) + Pin._override(consul.Consul, service=self.TEST_SERVICE, tracer=self.tracer) + Pin._override(consul.Consul.KV, service=self.TEST_SERVICE, tracer=self.tracer) self.c = c def tearDown(self): @@ -168,8 +168,8 @@ def setUp(self): host=CONSUL_CONFIG["host"], port=CONSUL_CONFIG["port"], ) - Pin.override(consul.Consul, tracer=self.tracer) - Pin.override(consul.Consul.KV, tracer=self.tracer) + Pin._override(consul.Consul, tracer=self.tracer) + Pin._override(consul.Consul.KV, tracer=self.tracer) self.c = c def tearDown(self): diff --git a/tests/contrib/dbapi/test_dbapi.py b/tests/contrib/dbapi/test_dbapi.py index c60e49c56af..00e1dc34c56 100644 --- a/tests/contrib/dbapi/test_dbapi.py +++ b/tests/contrib/dbapi/test_dbapi.py @@ -24,7 +24,8 @@ def test_execute_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.execute.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedCursor(cursor, pin, {}) # DEV: We always pass through the result assert "__result__" == traced_cursor.execute("__query__", "arg_1", kwarg1="kwarg1") @@ -37,7 +38,9 @@ def test_dbm_propagation_not_supported(self): # By default _dbm_propagator attribute should not be set or have a value of None. # DBM context propagation should be opt in. assert getattr(cfg, "_dbm_propagator", None) is None - traced_cursor = TracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) # Ensure dbm comment is not appended to sql statement traced_cursor.execute("SELECT * FROM db;") cursor.execute.assert_called_once_with("SELECT * FROM db;") @@ -53,7 +56,9 @@ def test_dbm_propagation_not_supported(self): def test_cursor_execute_with_dbm_injection(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "dbapi", service="orders-db", _dbm_propagator=_DBM_Propagator(0, "query")) - traced_cursor = TracedCursor(cursor, Pin(service="orders-db", tracer=self.tracer), cfg) + pin = Pin(service="orders-db") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) # The following operations should generate DBM comments traced_cursor.execute("SELECT * FROM db;") @@ -73,7 +78,8 @@ def test_executemany_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.executemany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedCursor(cursor, pin, {}) # DEV: We always pass through the result assert "__result__" == traced_cursor.executemany("__query__", "arg_1", kwarg1="kwarg1") @@ -83,7 +89,8 @@ def test_fetchone_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchone.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.fetchone("arg_1", kwarg1="kwarg1") cursor.fetchone.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -92,7 +99,8 @@ def test_fetchall_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchall.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.fetchall("arg_1", kwarg1="kwarg1") cursor.fetchall.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -101,7 +109,8 @@ def test_fetchmany_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchmany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.fetchmany("arg_1", kwarg1="kwarg1") cursor.fetchmany.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -110,7 +119,8 @@ def test_correct_span_names(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 0 - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = TracedCursor(cursor, pin, {}) traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -145,7 +155,8 @@ def test_when_pin_disabled_then_no_tracing(self): cursor.executemany.return_value = "__result__" tracer.enabled = False - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = TracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -174,7 +185,8 @@ def test_span_info(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = TracedCursor(cursor, pin, {}) def method(): @@ -199,7 +211,8 @@ def test_cfg_service(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin(None, tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin(None, tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test", service="cfg-service") traced_cursor = TracedCursor(cursor, pin, cfg) @@ -214,7 +227,8 @@ def test_default_service(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin(None, tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin(None, tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = TracedCursor(cursor, pin, {}) @@ -229,7 +243,8 @@ def test_default_service_cfg(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin(None, tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin(tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test", _default_service="default-svc") traced_cursor = TracedCursor(cursor, pin, cfg) @@ -244,7 +259,8 @@ def test_service_cfg_and_pin(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin("pin-svc", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("pin-svc", tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test", _default_service="default-svc") traced_cursor = TracedCursor(cursor, pin, cfg) @@ -262,7 +278,8 @@ def test_django_traced_cursor_backward_compatibility(self): # implementation with the generic dbapi traced cursor, we had to make sure to add the tag 'sql.rows' that was # set by the legacy replaced implementation. cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test") traced_cursor = TracedCursor(cursor, pin, cfg) @@ -286,7 +303,8 @@ def test_execute_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.execute.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.execute("__query__", "arg_1", kwarg1="kwarg1") cursor.execute.assert_called_once_with("__query__", "arg_1", kwarg1="kwarg1") @@ -296,7 +314,8 @@ def test_executemany_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.executemany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.executemany("__query__", "arg_1", kwarg1="kwarg1") cursor.executemany.assert_called_once_with("__query__", "arg_1", kwarg1="kwarg1") @@ -305,7 +324,8 @@ def test_fetchone_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchone.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.fetchone("arg_1", kwarg1="kwarg1") cursor.fetchone.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -314,7 +334,8 @@ def test_fetchall_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchall.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.fetchall("arg_1", kwarg1="kwarg1") cursor.fetchall.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -323,7 +344,8 @@ def test_fetchmany_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchmany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.fetchmany("arg_1", kwarg1="kwarg1") cursor.fetchmany.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -332,7 +354,8 @@ def test_correct_span_names(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 0 - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -367,7 +390,8 @@ def test_when_pin_disabled_then_no_tracing(self): cursor.executemany.return_value = "__result__" tracer.enabled = False - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) assert "__result__" == traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -396,7 +420,8 @@ def test_span_info(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) def method(): @@ -422,7 +447,8 @@ def test_django_traced_cursor_backward_compatibility(self): # implementation with the generic dbapi traced cursor, we had to make sure to add the tag 'sql.rows' that was # set by the legacy replaced implementation. cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) def method(): @@ -440,7 +466,8 @@ class Unknown(object): cursor = self.cursor tracer = self.tracer cursor.rowcount = Unknown() - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = FetchTracedCursor(cursor, pin, {}) def method(): @@ -453,7 +480,8 @@ def method(): def test_callproc_can_handle_arbitrary_args(self): cursor = self.cursor tracer = self.tracer - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer cursor.callproc.return_value = "gme --> moon" traced_cursor = TracedCursor(cursor, pin, {}) @@ -484,7 +512,9 @@ def test_cursor_execute_fetch_with_dbm_injection(self): cursor = self.cursor dbm_propagator = _DBM_Propagator(0, "query") cfg = IntegrationConfig(Config(), "dbapi", service="dbapi_service", _dbm_propagator=dbm_propagator) - traced_cursor = FetchTracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = FetchTracedCursor(cursor, pin, cfg) # The following operations should not generate DBM comments traced_cursor.fetchone() @@ -517,7 +547,8 @@ def setUp(self): self.connection = mock.Mock() def test_cursor_class(self): - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer # Default traced_connection = TracedConnection(self.connection, pin=pin) @@ -537,7 +568,8 @@ def test_commit_is_traced(self): connection = self.connection tracer = self.tracer connection.commit.return_value = None - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_connection = TracedConnection(connection, pin) traced_connection.commit() assert tracer.pop()[0].name == "mock.connection.commit" @@ -547,7 +579,8 @@ def test_rollback_is_traced(self): connection = self.connection tracer = self.tracer connection.rollback.return_value = None - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_connection = TracedConnection(connection, pin) traced_connection.rollback() assert tracer.pop()[0].name == "mock.connection.rollback" @@ -588,7 +621,8 @@ def cursor(self): def commit(self): pass - pin = Pin("pin", tracer=self.tracer) + pin = Pin("pin") + pin._tracer = self.tracer conn = TracedConnection(ConnectionConnection(), pin) with conn as conn2: conn2.commit() diff --git a/tests/contrib/dbapi/test_dbapi_appsec.py b/tests/contrib/dbapi/test_dbapi_appsec.py index 062a4ca667e..f1afbdb24af 100644 --- a/tests/contrib/dbapi/test_dbapi_appsec.py +++ b/tests/contrib/dbapi/test_dbapi_appsec.py @@ -50,7 +50,9 @@ def test_tainted_query(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "sqlite", service="dbapi_service") - traced_cursor = TracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) traced_cursor.execute(query) cursor.execute.assert_called_once_with(query) @@ -73,7 +75,9 @@ def test_tainted_query_args(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "sqlite", service="dbapi_service") - traced_cursor = TracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) traced_cursor.execute(query, (query_arg,)) cursor.execute.assert_called_once_with(query, (query_arg,)) @@ -88,7 +92,9 @@ def test_untainted_query(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "sqlite", service="dbapi_service") - traced_cursor = TracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) traced_cursor.execute(query) cursor.execute.assert_called_once_with(query) @@ -104,7 +110,9 @@ def test_untainted_query_and_args(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "sqlite", service="dbapi_service") - traced_cursor = TracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) traced_cursor.execute(query, (query_arg,)) cursor.execute.assert_called_once_with(query, (query_arg,)) @@ -124,7 +132,9 @@ def test_tainted_query_iast_disabled(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "sqlite", service="dbapi_service") - traced_cursor = TracedCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedCursor(cursor, pin, cfg) traced_cursor.execute(query) cursor.execute.assert_called_once_with(query) diff --git a/tests/contrib/dbapi_async/test_dbapi_async.py b/tests/contrib/dbapi_async/test_dbapi_async.py index ceb2b0cf6a1..dd16b7a2e33 100644 --- a/tests/contrib/dbapi_async/test_dbapi_async.py +++ b/tests/contrib/dbapi_async/test_dbapi_async.py @@ -26,7 +26,8 @@ async def test_execute_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.execute.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) # DEV: We always pass through the result assert "__result__" == await traced_cursor.execute("__query__", "arg_1", kwarg1="kwarg1") @@ -40,7 +41,9 @@ async def test_dbm_propagation_not_supported(self): # By default _dbm_propagator attribute should not be set or have a value of None. # DBM context propagation should be opt in. assert getattr(cfg, "_dbm_propagator", None) is None - traced_cursor = TracedAsyncCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin(service="dbapi_service") + pin._tracer = self.tracer + traced_cursor = TracedAsyncCursor(cursor, pin, cfg) # Ensure dbm comment is not appended to sql statement await traced_cursor.execute("SELECT * FROM db;") cursor.execute.assert_called_once_with("SELECT * FROM db;") @@ -57,7 +60,9 @@ async def test_dbm_propagation_not_supported(self): async def test_cursor_execute_with_dbm_injection(self): cursor = self.cursor cfg = IntegrationConfig(Config(), "dbapi", service="orders-db", _dbm_propagator=_DBM_Propagator(0, "query")) - traced_cursor = TracedAsyncCursor(cursor, Pin(service="orders-db", tracer=self.tracer), cfg) + pin = Pin(service="orders-db") + pin._tracer = self.tracer + traced_cursor = TracedAsyncCursor(cursor, pin, cfg) # The following operations should generate DBM comments await traced_cursor.execute("SELECT * FROM db;") @@ -78,7 +83,8 @@ async def test_executemany_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.executemany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) # DEV: We always pass through the result assert "__result__" == await traced_cursor.executemany("__query__", "arg_1", kwarg1="kwarg1") @@ -89,7 +95,8 @@ async def test_fetchone_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchone.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.fetchone("arg_1", kwarg1="kwarg1") cursor.fetchone.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -101,7 +108,9 @@ async def test_cursor_async_connection(self): def method(): pass - async with TracedAsyncCursor(self.cursor, Pin("dbapi_service", tracer=self.tracer), {}) as cursor: + pin = Pin("dbapi_service") + pin._tracer = self.tracer + async with TracedAsyncCursor(self.cursor, pin, {}) as cursor: await cursor.execute("""select 'one' as x""") await cursor.execute("""select 'blah'""") @@ -120,7 +129,8 @@ async def test_fetchall_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchall.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.fetchall("arg_1", kwarg1="kwarg1") cursor.fetchall.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -130,7 +140,8 @@ async def test_fetchmany_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchmany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.fetchmany("arg_1", kwarg1="kwarg1") cursor.fetchmany.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -140,7 +151,8 @@ async def test_correct_span_names(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 0 - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) await traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -176,7 +188,8 @@ async def test_when_pin_disabled_then_no_tracing(self): cursor.executemany.return_value = "__result__" tracer.enabled = False - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -206,7 +219,8 @@ async def test_span_info(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) async def method(): @@ -232,7 +246,8 @@ async def test_cfg_service(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin(None, tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin(None, tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test", service="cfg-service") traced_cursor = TracedAsyncCursor(cursor, pin, cfg) @@ -248,7 +263,8 @@ async def test_default_service(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin(None, tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin(None, tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = TracedAsyncCursor(cursor, pin, {}) @@ -264,7 +280,8 @@ async def test_default_service_cfg(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin(None, tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin(None, tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test", _default_service="default-svc") traced_cursor = TracedAsyncCursor(cursor, pin, cfg) @@ -280,7 +297,8 @@ async def test_service_cfg_and_pin(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin("pin-svc", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("pin-svc", tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test", _default_service="default-svc") traced_cursor = TracedAsyncCursor(cursor, pin, cfg) @@ -299,7 +317,8 @@ async def test_django_traced_cursor_backward_compatibility(self): # implementation with the generic dbapi traced cursor, we had to make sure to add the tag 'sql.rows' that was # set by the legacy replaced implementation. cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer cfg = IntegrationConfig(Config(), "db-test") traced_cursor = TracedAsyncCursor(cursor, pin, cfg) @@ -324,7 +343,8 @@ async def test_execute_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.execute.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.execute("__query__", "arg_1", kwarg1="kwarg1") cursor.execute.assert_called_once_with("__query__", "arg_1", kwarg1="kwarg1") @@ -335,7 +355,8 @@ async def test_executemany_wrapped_is_called_and_returned(self): cursor.rowcount = 0 cursor.executemany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.executemany("__query__", "arg_1", kwarg1="kwarg1") cursor.executemany.assert_called_once_with("__query__", "arg_1", kwarg1="kwarg1") @@ -345,7 +366,8 @@ async def test_fetchone_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchone.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.fetchone("arg_1", kwarg1="kwarg1") cursor.fetchone.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -355,7 +377,8 @@ async def test_fetchall_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchall.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.fetchall("arg_1", kwarg1="kwarg1") cursor.fetchall.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -365,7 +388,8 @@ async def test_fetchmany_wrapped_is_called_and_returned(self): cursor = self.cursor cursor.rowcount = 0 cursor.fetchmany.return_value = "__result__" - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.fetchmany("arg_1", kwarg1="kwarg1") cursor.fetchmany.assert_called_once_with("arg_1", kwarg1="kwarg1") @@ -375,7 +399,8 @@ async def test_correct_span_names(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 0 - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) await traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -411,7 +436,8 @@ async def test_when_pin_disabled_then_no_tracing(self): cursor.executemany.return_value = "__result__" tracer.enabled = False - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) assert "__result__" == await traced_cursor.execute("arg_1", kwarg1="kwarg1") @@ -441,7 +467,8 @@ async def test_span_info(self): cursor = self.cursor tracer = self.tracer cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) async def method(): @@ -468,7 +495,8 @@ async def test_django_traced_cursor_backward_compatibility(self): # implementation with the generic dbapi traced cursor, we had to make sure to add the tag 'sql.rows' that was # set by the legacy replaced implementation. cursor.rowcount = 123 - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) async def method(): @@ -487,7 +515,8 @@ class Unknown(object): cursor = self.cursor tracer = self.tracer cursor.rowcount = Unknown() - pin = Pin("my_service", tracer=tracer, tags={"pin1": "value_pin1"}) + pin = Pin("my_service", tags={"pin1": "value_pin1"}) + pin._tracer = tracer traced_cursor = FetchTracedAsyncCursor(cursor, pin, {}) async def method(): @@ -501,7 +530,8 @@ async def method(): async def test_callproc_can_handle_arbitrary_args(self): cursor = self.cursor tracer = self.tracer - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer cursor.callproc.return_value = "gme --> moon" traced_cursor = TracedAsyncCursor(cursor, pin, {}) @@ -533,7 +563,9 @@ async def test_cursor_execute_fetch_with_dbm_injection(self): cursor = self.cursor dbm_propagator = _DBM_Propagator(0, "query") cfg = IntegrationConfig(Config(), "dbapi", service="dbapi_service", _dbm_propagator=dbm_propagator) - traced_cursor = FetchTracedAsyncCursor(cursor, Pin("dbapi_service", tracer=self.tracer), cfg) + pin = Pin("dbapi_service") + pin._tracer = self.tracer + traced_cursor = FetchTracedAsyncCursor(cursor, pin, cfg) # The following operations should not generate DBM comments await traced_cursor.fetchone() @@ -567,7 +599,8 @@ def setUp(self): @mark_asyncio async def test_cursor_class(self): - pin = Pin("pin_name", tracer=self.tracer) + pin = Pin("pin_name") + pin._tracer = self.tracer # Default traced_connection = TracedAsyncConnection(self.connection, pin=pin) @@ -588,7 +621,8 @@ async def test_commit_is_traced(self): connection = self.connection tracer = self.tracer connection.commit.return_value = None - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_connection = TracedAsyncConnection(connection, pin) await traced_connection.commit() assert tracer.pop()[0].name == "mock.connection.commit" @@ -599,7 +633,8 @@ async def test_rollback_is_traced(self): connection = self.connection tracer = self.tracer connection.rollback.return_value = None - pin = Pin("pin_name", tracer=tracer) + pin = Pin("pin_name") + pin._tracer = tracer traced_connection = TracedAsyncConnection(connection, pin) await traced_connection.rollback() assert tracer.pop()[0].name == "mock.connection.rollback" @@ -641,7 +676,8 @@ def cursor(self): async def commit(self): pass - pin = Pin("pin", tracer=self.tracer) + pin = Pin("pin") + pin._tracer = self.tracer conn = TracedAsyncConnection(ConnectionConnection(), pin) async with conn as conn2: await conn2.commit() diff --git a/tests/contrib/django/conftest.py b/tests/contrib/django/conftest.py index 3dd992681b4..7a398abba46 100644 --- a/tests/contrib/django/conftest.py +++ b/tests/contrib/django/conftest.py @@ -32,7 +32,7 @@ def tracer(): # Patch Django and override tracer to be our test tracer pin = Pin.get_from(django) original_tracer = pin.tracer - Pin.override(django, tracer=tracer) + Pin._override(django, tracer=tracer) # Yield to our test yield tracer @@ -41,7 +41,7 @@ def tracer(): # Reset the tracer pinned to Django and unpatch # DEV: unable to properly unpatch and reload django app with each test # unpatch() - Pin.override(django, tracer=original_tracer) + Pin._override(django, tracer=original_tracer) @pytest.fixture diff --git a/tests/contrib/django/test_django.py b/tests/contrib/django/test_django.py index 79baceb1652..03fdffeaf45 100644 --- a/tests/contrib/django/test_django.py +++ b/tests/contrib/django/test_django.py @@ -1588,7 +1588,7 @@ def test_connection(client, test_spans): span = spans[0] assert span.name == "sqlite.query" - assert span.service == "{}" + assert span.service == "{}", span.service assert span.span_type == "sql" assert span.get_tag("django.db.vendor") == "sqlite" assert span.get_tag("django.db.alias") == "default" diff --git a/tests/contrib/django/test_django_dbm.py b/tests/contrib/django/test_django_dbm.py index d44f90f3208..cd2eb436c42 100644 --- a/tests/contrib/django/test_django_dbm.py +++ b/tests/contrib/django/test_django_dbm.py @@ -21,7 +21,7 @@ def get_cursor(tracer, service=None, propagation_mode="service", tags={}): pin = Pin.get_from(cursor) assert pin is not None - pin.clone(tracer=tracer, tags={**pin.tags, **tags}).onto(cursor) + pin._clone(tracer=tracer, tags={**pin.tags, **tags}).onto(cursor) return cursor diff --git a/tests/contrib/dogpile_cache/test_tracing.py b/tests/contrib/dogpile_cache/test_tracing.py index fec78818eda..ac42a8512ac 100644 --- a/tests/contrib/dogpile_cache/test_tracing.py +++ b/tests/contrib/dogpile_cache/test_tracing.py @@ -31,7 +31,7 @@ def region(tracer): # The backend is trivial so we can use memory to simplify test setup. test_region = dogpile.cache.make_region(name="TestRegion", key_mangler=lambda x: x) test_region.configure("dogpile.cache.memory") - Pin.override(dogpile.cache, tracer=tracer) + Pin._override(dogpile.cache, tracer=tracer) return test_region diff --git a/tests/contrib/dramatiq/test_integration.py b/tests/contrib/dramatiq/test_integration.py index 526aadc3861..990bef92bdc 100644 --- a/tests/contrib/dramatiq/test_integration.py +++ b/tests/contrib/dramatiq/test_integration.py @@ -35,7 +35,9 @@ def test_idempotent_unpatch(self): unpatch() tracer = DummyTracer() - Pin(tracer=tracer).onto(dramatiq) + pin = Pin() + pin._tracer = tracer + pin.onto(dramatiq) @dramatiq.actor def fn_task(): @@ -51,7 +53,9 @@ def test_fn_task_synchronous(self): # the body of the function is not instrumented so calling it # directly doesn't create a trace tracer = DummyTracer() - Pin(tracer=tracer).onto(dramatiq) + pin = Pin() + pin._tracer = tracer + pin.onto(dramatiq) @dramatiq.actor def fn_task(): diff --git a/tests/contrib/elasticsearch/test_elasticsearch.py b/tests/contrib/elasticsearch/test_elasticsearch.py index 6e381bc1e31..f1c461f4e51 100644 --- a/tests/contrib/elasticsearch/test_elasticsearch.py +++ b/tests/contrib/elasticsearch/test_elasticsearch.py @@ -92,7 +92,9 @@ def setUp(self): # `custom_tag` is a custom tag that can be set via `Pin`. "custom_tag": "bar", } - Pin(tracer=self.tracer, tags=tags).onto(es.transport) + pin = Pin(tags=tags) + pin._tracer = self.tracer + pin.onto(es.transport) self.create_index(es) patch() @@ -225,7 +227,9 @@ def test_patch_unpatch(self): patch() es = self._get_es() - Pin(tracer=self.tracer).onto(es.transport) + pin = Pin() + pin._tracer = self.tracer + pin.onto(es.transport) # Test index creation self.create_index(es) @@ -253,7 +257,9 @@ def test_patch_unpatch(self): patch() es = self._get_es() - Pin(tracer=self.tracer).onto(es.transport) + pin = Pin() + pin._tracer = self.tracer + pin.onto(es.transport) # Test index creation self.create_index(es) @@ -286,7 +292,9 @@ def test_user_specified_service_v1(self): assert config.service == "mysvc" self.create_index(self.es) - Pin(service="es", tracer=self.tracer).onto(self.es.transport) + pin = Pin(service="es") + pin._tracer = self.tracer + pin.onto(self.es.transport) spans = self.get_spans() self.reset() assert len(spans) == 1 @@ -303,7 +311,9 @@ def test_unspecified_service_v0(self): @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) def test_unspecified_service_v1(self): self.create_index(self.es) - Pin(service="es", tracer=self.tracer).onto(self.es.transport) + pin = Pin(service="es") + pin._tracer = self.tracer + pin.onto(self.es.transport) spans = self.get_spans() self.reset() assert len(spans) == 1 diff --git a/tests/contrib/flask/__init__.py b/tests/contrib/flask/__init__.py index a512a79f196..2a6fc49285c 100644 --- a/tests/contrib/flask/__init__.py +++ b/tests/contrib/flask/__init__.py @@ -36,7 +36,7 @@ def setUp(self): self.app = flask.Flask(__name__, template_folder="test_templates/") self.app.test_client_class = DDFlaskTestClient self.client = self.app.test_client() - Pin.override(self.app, tracer=self.tracer) + Pin._override(self.app, tracer=self.tracer) def tearDown(self): super(BaseFlaskTestCase, self).tearDown() diff --git a/tests/contrib/flask/test_blueprint.py b/tests/contrib/flask/test_blueprint.py index 96401dfa1a9..dffa959eceb 100644 --- a/tests/contrib/flask/test_blueprint.py +++ b/tests/contrib/flask/test_blueprint.py @@ -36,7 +36,9 @@ def test_blueprint_register(self): We do not use the ``flask.Flask`` app ``Pin`` """ bp = flask.Blueprint("pinned", __name__) - Pin(service="flask-bp", tracer=self.tracer).onto(bp) + pin = Pin(service="flask-bp") + pin._tracer = self.tracer + pin.onto(bp) # DEV: This is more common than calling ``flask.Blueprint.register`` directly self.app.register_blueprint(bp) @@ -58,7 +60,9 @@ def test_blueprint_add_url_rule(self): """ # When the Blueprint has a Pin attached bp = flask.Blueprint("pinned", __name__) - Pin(service="flask-bp", tracer=self.tracer).onto(bp) + pin = Pin(service="flask-bp") + pin._tracer = self.tracer + pin.onto(bp) @bp.route("/") def test_view(): @@ -113,7 +117,7 @@ def test_blueprint_request_pin_override(self): We create the expected spans """ bp = flask.Blueprint("bp", __name__) - Pin.override(bp, service="flask-bp", tracer=self.tracer) + Pin._override(bp, service="flask-bp", tracer=self.tracer) @bp.route("/") def test(): diff --git a/tests/contrib/flask/test_templates/test_insecure.html b/tests/contrib/flask/test_templates/test_insecure.html new file mode 100644 index 00000000000..a1921295a57 --- /dev/null +++ b/tests/contrib/flask/test_templates/test_insecure.html @@ -0,0 +1 @@ +hello {{world|safe}} diff --git a/tests/contrib/flask_autopatch/test_flask_autopatch.py b/tests/contrib/flask_autopatch/test_flask_autopatch.py index 27c4b47e2d0..9d0bfb7820b 100644 --- a/tests/contrib/flask_autopatch/test_flask_autopatch.py +++ b/tests/contrib/flask_autopatch/test_flask_autopatch.py @@ -17,7 +17,7 @@ class FlaskAutopatchTestCase(TracerTestCase): def setUp(self): super(FlaskAutopatchTestCase, self).setUp() self.app = flask.Flask(__name__) - Pin.override(self.app, service="test-flask", tracer=self.tracer) + Pin._override(self.app, service="test-flask", tracer=self.tracer) self.client = self.app.test_client() def test_patched(self): diff --git a/tests/contrib/flask_cache/test_utils.py b/tests/contrib/flask_cache/test_utils.py index fc5d640b5cf..6e779d21109 100644 --- a/tests/contrib/flask_cache/test_utils.py +++ b/tests/contrib/flask_cache/test_utils.py @@ -6,7 +6,7 @@ from ddtrace.contrib.internal.flask_cache.utils import _extract_client from ddtrace.contrib.internal.flask_cache.utils import _extract_conn_tags from ddtrace.contrib.internal.flask_cache.utils import _resource_from_cache_prefix -from ddtrace.trace import Tracer +from ddtrace.trace import tracer from ..config import MEMCACHED_CONFIG from ..config import REDIS_CONFIG @@ -17,7 +17,6 @@ class FlaskCacheUtilsTest(unittest.TestCase): def test_extract_redis_connection_metadata(self): # create the TracedCache instance for a Flask app - tracer = Tracer() Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { @@ -37,7 +36,6 @@ def test_extract_redis_connection_metadata(self): def test_extract_memcached_connection_metadata(self): # create the TracedCache instance for a Flask app - tracer = Tracer() Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { @@ -56,7 +54,6 @@ def test_extract_memcached_connection_metadata(self): def test_extract_memcached_multiple_connection_metadata(self): # create the TracedCache instance for a Flask app - tracer = Tracer() Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { @@ -78,7 +75,6 @@ def test_extract_memcached_multiple_connection_metadata(self): def test_resource_from_cache_with_prefix(self): # create the TracedCache instance for a Flask app - tracer = Tracer() Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { @@ -94,7 +90,6 @@ def test_resource_from_cache_with_prefix(self): def test_resource_from_cache_with_empty_prefix(self): # create the TracedCache instance for a Flask app - tracer = Tracer() Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) config = { @@ -110,7 +105,6 @@ def test_resource_from_cache_with_empty_prefix(self): def test_resource_from_cache_without_prefix(self): # create the TracedCache instance for a Flask app - tracer = Tracer() Cache = get_traced_cache(tracer, service=self.SERVICE) app = Flask(__name__) traced_cache = Cache(app, config={"CACHE_TYPE": "redis"}) diff --git a/tests/contrib/google_generativeai/conftest.py b/tests/contrib/google_generativeai/conftest.py index 64b2eb83d1b..69d86a7f05d 100644 --- a/tests/contrib/google_generativeai/conftest.py +++ b/tests/contrib/google_generativeai/conftest.py @@ -35,7 +35,7 @@ def mock_tracer(ddtrace_global_config, genai): try: pin = Pin.get_from(genai) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) - pin.override(genai, tracer=mock_tracer) + pin._override(genai, tracer=mock_tracer) pin.tracer._configure() if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use to mock tracer. diff --git a/tests/contrib/grpc/common.py b/tests/contrib/grpc/common.py index e67e4f32a92..bcc444a3fe2 100644 --- a/tests/contrib/grpc/common.py +++ b/tests/contrib/grpc/common.py @@ -20,8 +20,8 @@ class GrpcBaseTestCase(TracerTestCase): def setUp(self): super(GrpcBaseTestCase, self).setUp() patch() - Pin.override(constants.GRPC_PIN_MODULE_SERVER, tracer=self.tracer) - Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tracer=self.tracer) + Pin._override(constants.GRPC_PIN_MODULE_SERVER, tracer=self.tracer) + Pin._override(constants.GRPC_PIN_MODULE_CLIENT, tracer=self.tracer) self._start_server() def tearDown(self): diff --git a/tests/contrib/grpc/test_grpc.py b/tests/contrib/grpc/test_grpc.py index d0559f2dff5..6164682c449 100644 --- a/tests/contrib/grpc/test_grpc.py +++ b/tests/contrib/grpc/test_grpc.py @@ -227,9 +227,9 @@ def test_pin_not_activated(self): def test_pin_tags_are_put_in_span(self): # DEV: stop and restart server to catch overridden pin self._stop_server() - Pin.override(constants.GRPC_PIN_MODULE_SERVER, service="server1") - Pin.override(constants.GRPC_PIN_MODULE_SERVER, tags={"tag1": "server"}) - Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tags={"tag2": "client"}) + Pin._override(constants.GRPC_PIN_MODULE_SERVER, service="server1") + Pin._override(constants.GRPC_PIN_MODULE_SERVER, tags={"tag1": "server"}) + Pin._override(constants.GRPC_PIN_MODULE_CLIENT, tags={"tag2": "client"}) self._start_server() with grpc.insecure_channel("127.0.0.1:%d" % (_GRPC_PORT)) as channel: stub = HelloStub(channel) @@ -241,10 +241,10 @@ def test_pin_tags_are_put_in_span(self): assert spans[0].get_tag("tag2") == "client" def test_pin_can_be_defined_per_channel(self): - Pin.override(constants.GRPC_PIN_MODULE_CLIENT, service="grpc1") + Pin._override(constants.GRPC_PIN_MODULE_CLIENT, service="grpc1") channel1 = grpc.insecure_channel("127.0.0.1:%d" % (_GRPC_PORT)) - Pin.override(constants.GRPC_PIN_MODULE_CLIENT, service="grpc2") + Pin._override(constants.GRPC_PIN_MODULE_CLIENT, service="grpc2") channel2 = grpc.insecure_channel("127.0.0.1:%d" % (_GRPC_PORT)) stub1 = HelloStub(channel1) diff --git a/tests/contrib/grpc_aio/test_grpc_aio.py b/tests/contrib/grpc_aio/test_grpc_aio.py index 0606bcc3db2..e256a37a160 100644 --- a/tests/contrib/grpc_aio/test_grpc_aio.py +++ b/tests/contrib/grpc_aio/test_grpc_aio.py @@ -179,8 +179,8 @@ def patch_grpc_aio(): @pytest.fixture def tracer(): tracer = DummyTracer() - Pin.override(GRPC_AIO_PIN_MODULE_CLIENT, tracer=tracer) - Pin.override(GRPC_AIO_PIN_MODULE_SERVER, tracer=tracer) + Pin._override(GRPC_AIO_PIN_MODULE_CLIENT, tracer=tracer) + Pin._override(GRPC_AIO_PIN_MODULE_SERVER, tracer=tracer) yield tracer tracer.pop() @@ -354,13 +354,13 @@ async def test_pin_not_activated(server_info, tracer): [_CoroHelloServicer(), _SyncHelloServicer()], ) async def test_pin_tags_put_in_span(servicer, tracer): - Pin.override(GRPC_AIO_PIN_MODULE_SERVER, service="server1") - Pin.override(GRPC_AIO_PIN_MODULE_SERVER, tags={"tag1": "server"}) + Pin._override(GRPC_AIO_PIN_MODULE_SERVER, service="server1") + Pin._override(GRPC_AIO_PIN_MODULE_SERVER, tags={"tag1": "server"}) target = f"localhost:{_GRPC_PORT}" _server = _create_server(servicer, target) await _server.start() - Pin.override(GRPC_AIO_PIN_MODULE_CLIENT, tags={"tag2": "client"}) + Pin._override(GRPC_AIO_PIN_MODULE_CLIENT, tags={"tag2": "client"}) async with aio.insecure_channel(target) as channel: stub = HelloStub(channel) await stub.SayHello(HelloRequest(name="test")) @@ -383,10 +383,10 @@ async def test_pin_tags_put_in_span(servicer, tracer): @pytest.mark.parametrize("server_info", [_CoroHelloServicer(), _SyncHelloServicer()], indirect=True) async def test_pin_can_be_defined_per_channel(server_info, tracer): - Pin.override(GRPC_AIO_PIN_MODULE_CLIENT, service="grpc1") + Pin._override(GRPC_AIO_PIN_MODULE_CLIENT, service="grpc1") channel1 = aio.insecure_channel(server_info.target) - Pin.override(GRPC_AIO_PIN_MODULE_CLIENT, service="grpc2") + Pin._override(GRPC_AIO_PIN_MODULE_CLIENT, service="grpc2") channel2 = aio.insecure_channel(server_info.target) stub1 = HelloStub(channel1) diff --git a/tests/contrib/httplib/test_httplib.py b/tests/contrib/httplib/test_httplib.py index 24a5fe3f051..a57aff294a1 100644 --- a/tests/contrib/httplib/test_httplib.py +++ b/tests/contrib/httplib/test_httplib.py @@ -41,7 +41,7 @@ def setUp(self): super(HTTPLibBaseMixin, self).setUp() patch() - Pin.override(httplib, tracer=self.tracer) + Pin._override(httplib, tracer=self.tracer) def tearDown(self): unpatch() @@ -59,12 +59,12 @@ def to_str(self, value): def get_http_connection(self, *args, **kwargs): conn = httplib.HTTPConnection(*args, **kwargs) - Pin.override(conn, tracer=self.tracer) + Pin._override(conn, tracer=self.tracer) return conn def get_https_connection(self, *args, **kwargs): conn = httplib.HTTPSConnection(*args, **kwargs) - Pin.override(conn, tracer=self.tracer) + Pin._override(conn, tracer=self.tracer) return conn def test_patch(self): diff --git a/tests/contrib/httplib/test_httplib_distributed.py b/tests/contrib/httplib/test_httplib_distributed.py index 706921388bc..922f219f5ee 100644 --- a/tests/contrib/httplib/test_httplib_distributed.py +++ b/tests/contrib/httplib/test_httplib_distributed.py @@ -38,7 +38,7 @@ def headers_not_here(self, tracer): def get_http_connection(self, *args, **kwargs): conn = httplib.HTTPConnection(*args, **kwargs) - Pin.override(conn, tracer=self.tracer) + Pin._override(conn, tracer=self.tracer) return conn def request(self, conn=None): diff --git a/tests/contrib/httpx/test_httpx.py b/tests/contrib/httpx/test_httpx.py index 33ecadb825f..457c077f645 100644 --- a/tests/contrib/httpx/test_httpx.py +++ b/tests/contrib/httpx/test_httpx.py @@ -59,7 +59,7 @@ def test_httpx_service_name(tracer, test_spans): We set the span service name as a text type and not binary """ client = httpx.Client() - Pin.override(client, tracer=tracer) + Pin._override(client, tracer=tracer) with override_config("httpx", {"split_by_domain": True}): resp = client.get(get_url("/status/200")) @@ -124,15 +124,15 @@ def assert_spans(test_spans, service): # override the tracer on the default sync client # DEV: `httpx.get` will call `with Client() as client: client.get()` - Pin.override(httpx.Client, tracer=tracer) + Pin._override(httpx.Client, tracer=tracer) # sync client client = httpx.Client() - Pin.override(client, service="sync-client", tracer=tracer) + Pin._override(client, service="sync-client", tracer=tracer) # async client async_client = httpx.AsyncClient() - Pin.override(async_client, service="async-client", tracer=tracer) + Pin._override(async_client, service="async-client", tracer=tracer) resp = httpx.get(url, headers=DEFAULT_HEADERS) assert resp.status_code == 200 diff --git a/tests/contrib/httpx/test_httpx_pre_0_11.py b/tests/contrib/httpx/test_httpx_pre_0_11.py index 315c53cb29c..0f37df47ea3 100644 --- a/tests/contrib/httpx/test_httpx_pre_0_11.py +++ b/tests/contrib/httpx/test_httpx_pre_0_11.py @@ -57,7 +57,7 @@ async def test_httpx_service_name(tracer, test_spans): We set the span service name as a text type and not binary """ client = httpx.Client() - Pin.override(client, tracer=tracer) + Pin._override(client, tracer=tracer) with override_config("httpx", {"split_by_domain": True}): resp = await client.get(get_url("/status/200")) @@ -112,11 +112,11 @@ def assert_spans(test_spans, service): # override the tracer on the default sync client # DEV: `httpx.get` will call `with Client() as client: client.get()` - Pin.override(httpx.Client, tracer=tracer) + Pin._override(httpx.Client, tracer=tracer) # sync client client = httpx.Client() - Pin.override(client, service="sync-client", tracer=tracer) + Pin._override(client, service="sync-client", tracer=tracer) resp = await httpx.get(url, headers=DEFAULT_HEADERS) assert resp.status_code == 200 diff --git a/tests/contrib/jinja2/test_jinja2.py b/tests/contrib/jinja2/test_jinja2.py index ce91c7e5ed0..eac8aeffd85 100644 --- a/tests/contrib/jinja2/test_jinja2.py +++ b/tests/contrib/jinja2/test_jinja2.py @@ -26,7 +26,7 @@ def setUp(self): jinja2.environment._spontaneous_environments.clear() except AttributeError: jinja2.utils.clear_caches() - Pin.override(jinja2.environment.Environment, tracer=self.tracer) + Pin._override(jinja2.environment.Environment, tracer=self.tracer) def tearDown(self): super(Jinja2Test, self).tearDown() diff --git a/tests/contrib/kafka/test_kafka.py b/tests/contrib/kafka/test_kafka.py index c67bdd08b01..f99489595ac 100644 --- a/tests/contrib/kafka/test_kafka.py +++ b/tests/contrib/kafka/test_kafka.py @@ -22,7 +22,7 @@ from ddtrace.internal.utils.retry import fibonacci_backoff_with_jitter from ddtrace.trace import Pin from ddtrace.trace import TraceFilter -from ddtrace.trace import Tracer +from ddtrace.trace import tracer as ddtracer from tests.contrib.config import KAFKA_CONFIG from tests.datastreams.test_public_api import MockedTracer from tests.utils import DummyTracer @@ -106,16 +106,16 @@ def should_filter_empty_polls(): @pytest.fixture def tracer(should_filter_empty_polls): patch() - t = Tracer() if should_filter_empty_polls: - t._configure(trace_processors=[KafkaConsumerPollFilter()]) + ddtracer.configure(trace_processors=[KafkaConsumerPollFilter()]) # disable backoff because it makes these tests less reliable - t._writer._send_payload_with_backoff = t._writer._send_payload + previous_backoff = ddtracer._writer._send_payload_with_backoff + ddtracer._writer._send_payload_with_backoff = ddtracer._writer._send_payload try: - yield t + yield ddtracer finally: - t.flush() - t.shutdown() + ddtracer.flush() + ddtracer._writer._send_payload_with_backoff = previous_backoff unpatch() @@ -124,12 +124,14 @@ def dsm_processor(tracer): processor = tracer.data_streams_processor with mock.patch("ddtrace.internal.datastreams.data_streams_processor", return_value=processor): yield processor + # flush buckets for the next test run + processor.periodic() @pytest.fixture def producer(tracer): _producer = confluent_kafka.Producer({"bootstrap.servers": BOOTSTRAP_SERVERS}) - Pin.override(_producer, tracer=tracer) + Pin._override(_producer, tracer=tracer) return _producer @@ -146,7 +148,7 @@ def consumer(tracer, kafka_topic): tp = TopicPartition(kafka_topic, 0) tp.offset = 0 # we want to read the first message _consumer.commit(offsets=[tp]) - Pin.override(_consumer, tracer=tracer) + Pin._override(_consumer, tracer=tracer) _consumer.subscribe([kafka_topic]) yield _consumer _consumer.close() @@ -165,7 +167,7 @@ def non_auto_commit_consumer(tracer, kafka_topic): tp = TopicPartition(kafka_topic, 0) tp.offset = 0 # we want to read the first message _consumer.commit(offsets=[tp]) - Pin.override(_consumer, tracer=tracer) + Pin._override(_consumer, tracer=tracer) _consumer.subscribe([kafka_topic]) yield _consumer _consumer.close() @@ -176,7 +178,7 @@ def serializing_producer(tracer): _producer = confluent_kafka.SerializingProducer( {"bootstrap.servers": BOOTSTRAP_SERVERS, "value.serializer": lambda x, y: x} ) - Pin.override(_producer, tracer=tracer) + Pin._override(_producer, tracer=tracer) return _producer @@ -190,7 +192,7 @@ def deserializing_consumer(tracer, kafka_topic): "value.deserializer": lambda x, y: x, } ) - Pin.override(_consumer, tracer=tracer) + Pin._override(_consumer, tracer=tracer) _consumer.subscribe([kafka_topic]) yield _consumer _consumer.close() @@ -248,7 +250,7 @@ def test_producer_bootstrap_servers(config, expect_servers, tracer): def test_produce_single_server(dummy_tracer, producer, kafka_topic): - Pin.override(producer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) producer.produce(kafka_topic, PAYLOAD, key=KEY) producer.flush() @@ -259,18 +261,18 @@ def test_produce_single_server(dummy_tracer, producer, kafka_topic): def test_produce_none_key(dummy_tracer, producer, kafka_topic): - Pin.override(producer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) producer.produce(kafka_topic, PAYLOAD, key=None) producer.flush() traces = dummy_tracer.pop_traces() assert 1 == len(traces), "key=None does not cause produce() call to raise an exception" - Pin.override(producer, tracer=None) + Pin._override(producer, tracer=None) def test_produce_multiple_servers(dummy_tracer, kafka_topic): producer = confluent_kafka.Producer({"bootstrap.servers": ",".join([BOOTSTRAP_SERVERS] * 3)}) - Pin.override(producer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) producer.produce(kafka_topic, PAYLOAD, key=KEY) producer.flush() @@ -278,7 +280,7 @@ def test_produce_multiple_servers(dummy_tracer, kafka_topic): assert 1 == len(traces) produce_span = traces[0][0] assert produce_span.get_tag("messaging.kafka.bootstrap.servers") == ",".join([BOOTSTRAP_SERVERS] * 3) - Pin.override(producer, tracer=None) + Pin._override(producer, tracer=None) @pytest.mark.parametrize("tombstone", [False, True]) @@ -325,6 +327,7 @@ def test_commit_with_consume_with_multiple_messages(producer, consumer, kafka_to @pytest.mark.snapshot(ignores=SNAPSHOT_IGNORES) @pytest.mark.parametrize("should_filter_empty_polls", [False]) +@pytest.mark.skip(reason="FIXME: This test requires the initialization of a new tracer. This is not supported") def test_commit_with_consume_with_error(producer, consumer, kafka_topic): producer.produce(kafka_topic, PAYLOAD, key=KEY) producer.flush() @@ -518,8 +521,8 @@ def _generate_in_subprocess(random_topic): "auto.offset.reset": "earliest", } ) - ddtrace.trace.Pin.override(producer, tracer=ddtrace.tracer) - ddtrace.trace.Pin.override(consumer, tracer=ddtrace.tracer) + ddtrace.trace.Pin._override(producer, tracer=ddtrace.tracer) + ddtrace.trace.Pin._override(consumer, tracer=ddtrace.tracer) # We run all of these commands with retry attempts because the kafka-confluent API # sys.exits on connection failures, which causes the test to fail. We want to retry @@ -750,8 +753,8 @@ def test_data_streams_default_context_propagation(consumer, producer, kafka_topi # It is not currently expected for kafka produce and consume spans to connect in a trace def test_tracing_context_is_not_propagated_by_default(dummy_tracer, consumer, producer, kafka_topic): - Pin.override(producer, tracer=dummy_tracer) - Pin.override(consumer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) + Pin._override(consumer, tracer=dummy_tracer) test_string = "context test no propagation" test_key = "context test key no propagation" @@ -788,8 +791,8 @@ def test_tracing_context_is_not_propagated_by_default(dummy_tracer, consumer, pr # None of these spans are part of the same trace assert produce_span.trace_id != consume_span.trace_id - Pin.override(consumer, tracer=None) - Pin.override(producer, tracer=None) + Pin._override(consumer, tracer=None) + Pin._override(producer, tracer=None) # Propagation should work when enabled @@ -813,8 +816,8 @@ def test(consumer, producer, kafka_topic): patch() dummy_tracer = DummyTracer() dummy_tracer.flush() - Pin.override(producer, tracer=dummy_tracer) - Pin.override(consumer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) + Pin._override(consumer, tracer=dummy_tracer) # use a random int in this string to prevent reading a message produced by a previous test run test_string = "context propagation enabled test " + str(random.randint(0, 1000)) @@ -851,8 +854,8 @@ def test(consumer, producer, kafka_topic): # Two of these spans are part of the same trace assert produce_span.trace_id == consume_span.trace_id - Pin.override(consumer, tracer=None) - Pin.override(producer, tracer=None) + Pin._override(consumer, tracer=None) + Pin._override(producer, tracer=None) if __name__ == "__main__": sys.exit(pytest.main(["-x", __file__])) @@ -897,7 +900,7 @@ def test_consumer_uses_active_context_when_no_valid_distributed_context_exists( producer.produce(kafka_topic, PAYLOAD, key=test_key) producer.flush() - Pin.override(consumer, tracer=dummy_tracer) + Pin._override(consumer, tracer=dummy_tracer) with dummy_tracer.trace("kafka consumer parent span") as parent_span: with override_config("kafka", dict(distributed_tracing_enabled=True)): @@ -912,12 +915,12 @@ def test_consumer_uses_active_context_when_no_valid_distributed_context_exists( assert consume_span.name == "kafka.consume" assert consume_span.parent_id == parent_span.span_id - Pin.override(consumer, tracer=None) + Pin._override(consumer, tracer=None) def test_span_has_dsm_payload_hash(dummy_tracer, consumer, producer, kafka_topic): - Pin.override(producer, tracer=dummy_tracer) - Pin.override(consumer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) + Pin._override(consumer, tracer=dummy_tracer) test_string = "payload hash test" PAYLOAD = bytes(test_string, encoding="utf-8") @@ -943,8 +946,8 @@ def test_span_has_dsm_payload_hash(dummy_tracer, consumer, producer, kafka_topic assert consume_span.name == "kafka.consume" assert consume_span.get_tag("pathway.hash") is not None - Pin.override(consumer, tracer=None) - Pin.override(producer, tracer=None) + Pin._override(consumer, tracer=None) + Pin._override(producer, tracer=None) def test_tracing_with_serialization_works(dummy_tracer, kafka_topic): @@ -978,8 +981,8 @@ def json_deserializer(as_bytes, ctx): _consumer.commit(offsets=[tp]) _consumer.subscribe([kafka_topic]) - Pin.override(_producer, tracer=dummy_tracer) - Pin.override(_consumer, tracer=dummy_tracer) + Pin._override(_producer, tracer=dummy_tracer) + Pin._override(_consumer, tracer=dummy_tracer) test_string = "serializing_test" PAYLOAD = {"val": test_string} @@ -1004,12 +1007,12 @@ def json_deserializer(as_bytes, ctx): # a string assert consume_span.get_tag("kafka.message_key") is None - Pin.override(_consumer, tracer=None) - Pin.override(_producer, tracer=None) + Pin._override(_consumer, tracer=None) + Pin._override(_producer, tracer=None) def test_traces_empty_poll_by_default(dummy_tracer, consumer, kafka_topic): - Pin.override(consumer, tracer=dummy_tracer) + Pin._override(consumer, tracer=dummy_tracer) message = "hello" while message is not None: @@ -1029,7 +1032,7 @@ def test_traces_empty_poll_by_default(dummy_tracer, consumer, kafka_topic): assert empty_poll_span_created is True - Pin.override(consumer, tracer=None) + Pin._override(consumer, tracer=None) # Poll should not be traced when disabled @@ -1054,8 +1057,8 @@ def test(consumer, producer, kafka_topic): patch() dummy_tracer = DummyTracer() dummy_tracer.flush() - Pin.override(producer, tracer=dummy_tracer) - Pin.override(consumer, tracer=dummy_tracer) + Pin._override(producer, tracer=dummy_tracer) + Pin._override(consumer, tracer=dummy_tracer) assert config.kafka.trace_empty_poll_enabled is False @@ -1102,8 +1105,8 @@ def test(consumer, producer, kafka_topic): assert non_empty_poll_span_created is True - Pin.override(consumer, tracer=None) - Pin.override(producer, tracer=None) + Pin._override(consumer, tracer=None) + Pin._override(producer, tracer=None) if __name__ == "__main__": sys.exit(pytest.main(["-x", __file__])) diff --git a/tests/contrib/kombu/test.py b/tests/contrib/kombu/test.py index b56ecdf0d0f..949cffd5bd5 100644 --- a/tests/contrib/kombu/test.py +++ b/tests/contrib/kombu/test.py @@ -28,7 +28,7 @@ def setUp(self): conn = kombu.Connection("amqp://guest:guest@127.0.0.1:{p}//".format(p=self.TEST_PORT)) conn.connect() producer = conn.Producer() - Pin.override(producer, service=self.TEST_SERVICE, tracer=self.tracer) + Pin._override(producer, service=self.TEST_SERVICE, tracer=self.tracer) self.conn = conn self.producer = producer @@ -63,7 +63,7 @@ def process_message(body, message): ) with kombu.Consumer(self.conn, [task_queue], accept=["json"], callbacks=[process_message]) as consumer: - Pin.override(consumer, service="kombu-patch", tracer=self.tracer) + Pin._override(consumer, service="kombu-patch", tracer=self.tracer) self.conn.drain_events(timeout=2) self.assertEqual(results[0], to_publish) @@ -130,7 +130,7 @@ def setUp(self): conn = kombu.Connection("amqp://guest:guest@127.0.0.1:{p}//".format(p=RABBITMQ_CONFIG["port"])) conn.connect() producer = conn.Producer() - Pin.override(producer, tracer=self.tracer) + Pin._override(producer, tracer=self.tracer) self.conn = conn self.producer = producer @@ -151,7 +151,7 @@ def setUp(self): conn = kombu.Connection("amqp://guest:guest@127.0.0.1:{p}//".format(p=self.TEST_PORT)) conn.connect() producer = conn.Producer() - Pin.override(producer, tracer=self.tracer) + Pin._override(producer, tracer=self.tracer) self.conn = conn self.producer = producer @@ -180,7 +180,7 @@ def process_message(body, message): ) with kombu.Consumer(self.conn, [task_queue], accept=["json"], callbacks=[process_message]) as consumer: - Pin.override(consumer, tracer=self.tracer) + Pin._override(consumer, tracer=self.tracer) self.conn.drain_events(timeout=2) return self.get_spans() @@ -256,7 +256,7 @@ def process_message(body, message): ) with kombu.Consumer(self.conn, [task_queue], accept=["json"], callbacks=[process_message]) as consumer: - Pin.override(consumer, tracer=self.tracer) + Pin._override(consumer, tracer=self.tracer) self.conn.drain_events(timeout=2) spans = self.get_spans() @@ -275,7 +275,7 @@ def setUp(self): self.conn = kombu.Connection("amqp://guest:guest@127.0.0.1:{p}//".format(p=RABBITMQ_CONFIG["port"])) self.conn.connect() self.producer = self.conn.Producer() - Pin.override(self.producer, tracer=self.tracer) + Pin._override(self.producer, tracer=self.tracer) self.patcher = mock.patch( "ddtrace.internal.datastreams.data_streams_processor", return_value=self.tracer.data_streams_processor @@ -313,7 +313,7 @@ def process_message(body, message): self.producer.publish(to_publish, routing_key=task_queue.routing_key, declare=[task_queue]) with kombu.Consumer(self.conn, [task_queue], accept=["json"], callbacks=[process_message]) as consumer: - Pin.override(consumer, service="kombu-patch", tracer=self.tracer) + Pin._override(consumer, service="kombu-patch", tracer=self.tracer) self.conn.drain_events(timeout=2) queue_name = consumer.channel.queue_declare("tasks", passive=True).queue diff --git a/tests/contrib/langchain/conftest.py b/tests/contrib/langchain/conftest.py index d5c6e4d0b1a..a7b59c11f95 100644 --- a/tests/contrib/langchain/conftest.py +++ b/tests/contrib/langchain/conftest.py @@ -30,7 +30,7 @@ def snapshot_tracer(langchain, mock_logs, mock_metrics): def mock_tracer(langchain): pin = Pin.get_from(langchain) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) - pin.override(langchain, tracer=mock_tracer) + pin._override(langchain, tracer=mock_tracer) pin.tracer._configure() yield mock_tracer diff --git a/tests/contrib/langchain/test_langchain.py b/tests/contrib/langchain/test_langchain.py index d44d2ade384..a36dd5321d4 100644 --- a/tests/contrib/langchain/test_langchain.py +++ b/tests/contrib/langchain/test_langchain.py @@ -21,7 +21,6 @@ "meta.langchain.request.openai.parameters.logprobs", "meta.langchain.request.openai.parameters.seed", # langchain-openai llm call now includes seed as param "meta.langchain.request.openai.parameters.logprobs", # langchain-openai llm call now includes seed as param - "metrics.langchain.tokens.total_cost", # total_cost depends on if tiktoken is installed # these are sometimes named differently "meta.langchain.request.openai.parameters.max_tokens", "meta.langchain.request.openai.parameters.max_completion_tokens", diff --git a/tests/contrib/langgraph/conftest.py b/tests/contrib/langgraph/conftest.py index 13e1a5f9242..7c6671167bb 100644 --- a/tests/contrib/langgraph/conftest.py +++ b/tests/contrib/langgraph/conftest.py @@ -32,7 +32,7 @@ def langgraph(monkeypatch, mock_tracer): import langgraph pin = Pin.get_from(langgraph) - pin.override(langgraph, tracer=mock_tracer) + pin._override(langgraph, tracer=mock_tracer) yield langgraph unpatch() diff --git a/tests/contrib/mako/test_mako.py b/tests/contrib/mako/test_mako.py index 7e690b04a43..7b839177e0e 100644 --- a/tests/contrib/mako/test_mako.py +++ b/tests/contrib/mako/test_mako.py @@ -23,7 +23,7 @@ class MakoTest(TracerTestCase): def setUp(self): super(MakoTest, self).setUp() patch() - Pin.override(Template, tracer=self.tracer) + Pin._override(Template, tracer=self.tracer) def tearDown(self): super(MakoTest, self).tearDown() diff --git a/tests/contrib/mariadb/test_mariadb.py b/tests/contrib/mariadb/test_mariadb.py index 7ea8cd27feb..2f51f2e9b0a 100644 --- a/tests/contrib/mariadb/test_mariadb.py +++ b/tests/contrib/mariadb/test_mariadb.py @@ -39,7 +39,7 @@ def tracer(): def get_connection(tracer): connection = mariadb.connect(**MARIADB_CONFIG) - Pin.override(connection, tracer=tracer) + Pin._override(connection, tracer=tracer) return connection diff --git a/tests/contrib/molten/test_molten.py b/tests/contrib/molten/test_molten.py index cc73ceef861..74d6f1bf628 100644 --- a/tests/contrib/molten/test_molten.py +++ b/tests/contrib/molten/test_molten.py @@ -49,7 +49,7 @@ class TestMolten(TracerTestCase): def setUp(self): super(TestMolten, self).setUp() patch() - Pin.override(molten, tracer=self.tracer) + Pin._override(molten, tracer=self.tracer) self.app = molten_app() self.client = TestClient(self.app) @@ -89,7 +89,7 @@ def test_route_success(self): self.assertEqual(len(spans), 16) # test override of service name - Pin.override(molten, service=self.TEST_SERVICE) + Pin._override(molten, service=self.TEST_SERVICE) response = self.make_request() spans = self.pop_spans() self.assertEqual(spans[0].service, "molten-patch") @@ -273,7 +273,7 @@ def test_unpatch_patch(self): patch() # Need to override Pin here as we do in setUp - Pin.override(molten, tracer=self.tracer) + Pin._override(molten, tracer=self.tracer) self.assertTrue(Pin.get_from(molten) is not None) self.make_request() spans = self.pop_spans() diff --git a/tests/contrib/molten/test_molten_di.py b/tests/contrib/molten/test_molten_di.py index d360698f4cb..848517aca34 100644 --- a/tests/contrib/molten/test_molten_di.py +++ b/tests/contrib/molten/test_molten_di.py @@ -85,7 +85,7 @@ class TestMoltenDI(TracerTestCase): def setUp(self): super(TestMoltenDI, self).setUp() patch() - Pin.override(molten, tracer=self.tracer, service=self.TEST_SERVICE) + Pin._override(molten, tracer=self.tracer, service=self.TEST_SERVICE) def tearDown(self): unpatch() diff --git a/tests/contrib/mongoengine/test.py b/tests/contrib/mongoengine/test.py index b3961e3808c..51b0ff70c47 100644 --- a/tests/contrib/mongoengine/test.py +++ b/tests/contrib/mongoengine/test.py @@ -186,7 +186,7 @@ def tearDown(self): def get_tracer_and_connect(self): tracer = DummyTracer() client = mongoengine.connect(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) return tracer @@ -315,7 +315,9 @@ class TestMongoEnginePatchConnect(TestMongoEnginePatchConnectDefault): def get_tracer_and_connect(self): tracer = TestMongoEnginePatchConnectDefault.get_tracer_and_connect(self) - Pin(service=self.TEST_SERVICE, tracer=tracer).onto(mongoengine.connect) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = tracer + pin.onto(mongoengine.connect) mongoengine.connect(port=MONGO_CONFIG["port"]) return tracer @@ -337,7 +339,7 @@ def tearDown(self): def get_tracer_and_connect(self): tracer = DummyTracer() client = mongoengine.connect(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) return tracer @@ -352,7 +354,9 @@ def get_tracer_and_connect(self): # Set a connect-level service, to check that we properly override it Pin(service="not-%s" % self.TEST_SERVICE).onto(mongoengine.connect) client = mongoengine.connect(port=MONGO_CONFIG["port"]) - Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = tracer + pin.onto(client) return tracer @@ -364,7 +368,7 @@ def test_patch_unpatch(self): patch() client = mongoengine.connect(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) Artist.drop_collection() spans = tracer.pop() @@ -389,7 +393,7 @@ def test_patch_unpatch(self): # Test patch again patch() client = mongoengine.connect(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) Artist.drop_collection() spans = tracer.pop() diff --git a/tests/contrib/mysql/test_mysql.py b/tests/contrib/mysql/test_mysql.py index 08626890fac..ec0da0aff35 100644 --- a/tests/contrib/mysql/test_mysql.py +++ b/tests/contrib/mysql/test_mysql.py @@ -418,7 +418,7 @@ def _get_conn_tracer(self): # assert pin.service == 'mysql' # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer).onto(self.conn) + pin._clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer @@ -434,7 +434,7 @@ def test_patch_unpatch(self): conn = mysql.connector.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin - pin.clone(service="pin-svc", tracer=self.tracer).onto(conn) + pin._clone(service="pin-svc", tracer=self.tracer).onto(conn) assert conn.is_connected() cursor = conn.cursor() diff --git a/tests/contrib/mysqldb/test_mysqldb.py b/tests/contrib/mysqldb/test_mysqldb.py index 5d2c98a752c..4117eb69a10 100644 --- a/tests/contrib/mysqldb/test_mysqldb.py +++ b/tests/contrib/mysqldb/test_mysqldb.py @@ -534,7 +534,7 @@ def _add_dummy_tracer_to_pinned(self, obj): assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer).onto(obj) + pin._clone(tracer=self.tracer).onto(obj) def _get_conn_tracer(self): if not self.conn: @@ -559,7 +559,7 @@ def _get_conn_tracer_with_positional_args(self): assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer).onto(self.conn) + pin._clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer @@ -575,7 +575,7 @@ def test_patch_unpatch(self): conn = self._connect_with_kwargs() pin = Pin.get_from(conn) assert pin - pin.clone(tracer=self.tracer).onto(conn) + pin._clone(tracer=self.tracer).onto(conn) conn.ping() cursor = conn.cursor() @@ -617,7 +617,7 @@ def test_patch_unpatch(self): def test_user_pin_override(self): conn, tracer = self._get_conn_tracer() pin = Pin.get_from(conn) - pin.clone(service="pin-svc", tracer=self.tracer).onto(conn) + pin._clone(service="pin-svc", tracer=self.tracer).onto(conn) cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() diff --git a/tests/contrib/openai/conftest.py b/tests/contrib/openai/conftest.py index 615a4e773b1..9cd8c998c79 100644 --- a/tests/contrib/openai/conftest.py +++ b/tests/contrib/openai/conftest.py @@ -92,34 +92,6 @@ def process_trace(self, trace): return trace -@pytest.fixture(scope="session") -def mock_metrics(): - patcher = mock.patch("ddtrace.llmobs._integrations.base.get_dogstatsd_client") - try: - DogStatsdMock = patcher.start() - m = mock.MagicMock() - DogStatsdMock.return_value = m - yield m - finally: - patcher.stop() - - -@pytest.fixture(scope="session") -def mock_logs(): - """ - Note that this fixture must be ordered BEFORE mock_tracer as it needs to patch the log writer - before it is instantiated. - """ - patcher = mock.patch("ddtrace.llmobs._integrations.base.V2LogWriter") - try: - V2LogWriterMock = patcher.start() - m = mock.MagicMock() - V2LogWriterMock.return_value = m - yield m - finally: - patcher.stop() - - @pytest.fixture() def mock_llmobs_writer(): patcher = mock.patch("ddtrace.llmobs._llmobs.LLMObsSpanWriter") @@ -163,21 +135,18 @@ def patch_openai(ddtrace_global_config, ddtrace_config_openai, openai_api_key, o @pytest.fixture -def snapshot_tracer(openai, patch_openai, mock_logs, mock_metrics): +def snapshot_tracer(openai, patch_openai): pin = Pin.get_from(openai) pin.tracer._configure(trace_processors=[FilterOrg()]) yield pin.tracer - mock_logs.reset_mock() - mock_metrics.reset_mock() - @pytest.fixture -def mock_tracer(ddtrace_global_config, openai, patch_openai, mock_logs, mock_metrics): +def mock_tracer(ddtrace_global_config, openai, patch_openai): pin = Pin.get_from(openai) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) - pin.override(openai, tracer=mock_tracer) + pin._override(openai, tracer=mock_tracer) pin.tracer._configure(trace_processors=[FilterOrg()]) if ddtrace_global_config.get("_llmobs_enabled", False): @@ -187,6 +156,4 @@ def mock_tracer(ddtrace_global_config, openai, patch_openai, mock_logs, mock_met yield mock_tracer - mock_logs.reset_mock() - mock_metrics.reset_mock() LLMObs.disable() diff --git a/tests/contrib/openai/test_openai_llmobs.py b/tests/contrib/openai/test_openai_llmobs.py index 70adff39ef5..91e454c1673 100644 --- a/tests/contrib/openai/test_openai_llmobs.py +++ b/tests/contrib/openai/test_openai_llmobs.py @@ -602,9 +602,7 @@ def test_embedding_string_base64(self, openai, ddtrace_global_config, mock_llmob [dict(_llmobs_enabled=True, _llmobs_ml_app="", _llmobs_agentless_enabled=True)], ) @pytest.mark.skipif(parse_version(openai_module.version.VERSION) < (1, 0), reason="These tests are for openai >= 1.0") -def test_agentless_enabled_does_not_submit_metrics( - openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer, mock_metrics -): +def test_agentless_enabled_does_not_submit_metrics(openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer): """Ensure openai metrics are not emitted when agentless mode is enabled.""" with get_openai_vcr(subdirectory_name="v1").use_cassette("completion.yaml"): model = "ada" @@ -619,7 +617,3 @@ def test_agentless_enabled_does_not_submit_metrics( user="ddtrace-test", ) assert mock_llmobs_writer.enqueue.call_count == 1 - mock_metrics.assert_not_called() - assert mock_metrics.increment.call_count == 0 - assert mock_metrics.distribution.call_count == 0 - assert mock_metrics.gauge.call_count == 0 diff --git a/tests/contrib/openai/test_openai_v1.py b/tests/contrib/openai/test_openai_v1.py index a081583b4d1..468f4b03606 100644 --- a/tests/contrib/openai/test_openai_v1.py +++ b/tests/contrib/openai/test_openai_v1.py @@ -25,18 +25,8 @@ def openai_vcr(): yield get_openai_vcr(subdirectory_name="v1") -@pytest.mark.parametrize("ddtrace_config_openai", [dict(metrics_enabled=True), dict(metrics_enabled=False)]) -def test_config(ddtrace_config_openai, mock_tracer, openai): - # Ensure that the module state is reloaded for each test run - assert not hasattr(openai, "_test") - openai._test = 1 - - # Ensure overriding the config works - assert ddtrace.config.openai.metrics_enabled is ddtrace_config_openai["metrics_enabled"] - - @pytest.mark.parametrize("api_key_in_env", [True, False]) -def test_model_list(api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_model_list(api_key_in_env, request_api_key, openai, openai_vcr, snapshot_tracer): with snapshot_context( token="tests.contrib.openai.test_openai.test_model_list", ignores=["meta.http.useragent", "meta.openai.api_type", "meta.openai.api_base", "meta.openai.request.user"], @@ -47,7 +37,7 @@ def test_model_list(api_key_in_env, request_api_key, openai, openai_vcr, mock_me @pytest.mark.parametrize("api_key_in_env", [True, False]) -async def test_model_alist(api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, snapshot_tracer): +async def test_model_alist(api_key_in_env, request_api_key, openai, openai_vcr, snapshot_tracer): with snapshot_context( token="tests.contrib.openai.test_openai.test_model_list", ignores=["meta.http.useragent", "meta.openai.api_type", "meta.openai.api_base", "meta.openai.request.user"], @@ -58,7 +48,7 @@ async def test_model_alist(api_key_in_env, request_api_key, openai, openai_vcr, @pytest.mark.parametrize("api_key_in_env", [True, False]) -def test_model_retrieve(api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_model_retrieve(api_key_in_env, request_api_key, openai, openai_vcr, snapshot_tracer): with snapshot_context( token="tests.contrib.openai.test_openai.test_model_retrieve", ignores=["meta.http.useragent", "meta.openai.api_type", "meta.openai.api_base", "meta.openai.request.user"], @@ -69,7 +59,7 @@ def test_model_retrieve(api_key_in_env, request_api_key, openai, openai_vcr, moc @pytest.mark.parametrize("api_key_in_env", [True, False]) -async def test_model_aretrieve(api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, snapshot_tracer): +async def test_model_aretrieve(api_key_in_env, request_api_key, openai, openai_vcr, snapshot_tracer): with snapshot_context( token="tests.contrib.openai.test_openai.test_model_retrieve", ignores=["meta.http.useragent", "meta.openai.api_type", "meta.openai.api_base", "meta.openai.request.user"], @@ -80,9 +70,7 @@ async def test_model_aretrieve(api_key_in_env, request_api_key, openai, openai_v @pytest.mark.parametrize("api_key_in_env", [True, False]) -def test_completion( - api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, mock_logs, mock_llmobs_writer, snapshot_tracer -): +def test_completion(api_key_in_env, request_api_key, openai, openai_vcr, mock_llmobs_writer, snapshot_tracer): with snapshot_context( token="tests.contrib.openai.test_openai.test_completion", ignores=["meta.http.useragent", "meta.openai.api_type", "meta.openai.api_base"], @@ -111,42 +99,12 @@ def test_completion( assert choice.logprobs == expected_choices[idx]["logprobs"] assert choice.text == expected_choices[idx]["text"] - expected_tags = [ - "version:", - "env:", - "service:tests.contrib.openai", - "openai.request.model:ada", - "model:ada", - "openai.request.endpoint:/v1/completions", - "openai.request.method:POST", - "openai.organization.id:", - "openai.organization.name:datadog-4", - "openai.user.api_key:sk-...key>", - "error:0", - ] - mock_metrics.assert_has_calls( - [ - mock.call.distribution("tokens.prompt", 2, tags=expected_tags + ["openai.estimated:false"]), - mock.call.distribution("tokens.completion", 12, tags=expected_tags + ["openai.estimated:false"]), - mock.call.distribution("tokens.total", 14, tags=expected_tags + ["openai.estimated:false"]), - mock.call.distribution("request.duration", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.remaining.requests", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.requests", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.remaining.tokens", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.tokens", mock.ANY, tags=expected_tags), - ], - any_order=True, - ) - mock_logs.start.assert_not_called() - mock_logs.enqueue.assert_not_called() mock_llmobs_writer.start.assert_not_called() mock_llmobs_writer.enqueue.assert_not_called() @pytest.mark.parametrize("api_key_in_env", [True, False]) -async def test_acompletion( - api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, mock_logs, mock_llmobs_writer, snapshot_tracer -): +async def test_acompletion(api_key_in_env, request_api_key, openai, openai_vcr, mock_llmobs_writer, snapshot_tracer): with snapshot_context( token="tests.contrib.openai.test_openai.test_acompletion", ignores=["meta.http.useragent", "meta.openai.api_type", "meta.openai.api_base"], @@ -181,88 +139,11 @@ async def test_acompletion( for key, value in expected_choices.items(): assert getattr(resp.choices[0], key, None) == value - expected_tags = [ - "version:", - "env:", - "service:tests.contrib.openai", - "openai.request.model:curie", - "model:curie", - "openai.request.endpoint:/v1/completions", - "openai.request.method:POST", - "openai.organization.id:", - "openai.organization.name:datadog-4", - "openai.user.api_key:sk-...key>", - "error:0", - ] - mock_metrics.assert_has_calls( - [ - mock.call.distribution("tokens.prompt", 10, tags=expected_tags + ["openai.estimated:false"]), - mock.call.distribution("tokens.completion", 150, tags=expected_tags + ["openai.estimated:false"]), - mock.call.distribution("tokens.total", 160, tags=expected_tags + ["openai.estimated:false"]), - mock.call.distribution("request.duration", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.remaining.requests", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.requests", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.remaining.tokens", mock.ANY, tags=expected_tags), - mock.call.gauge("ratelimit.tokens", mock.ANY, tags=expected_tags), - ], - any_order=True, - ) - mock_logs.start.assert_not_called() - mock_logs.enqueue.assert_not_called() mock_llmobs_writer.start.assert_not_called() mock_llmobs_writer.enqueue.assert_not_called() -@pytest.mark.xfail(reason="An API key is required when logs are enabled") -@pytest.mark.parametrize( - "ddtrace_global_config,ddtrace_config_openai", - [(dict(_dd_api_key=""), dict(logs_enabled=True))], -) -def test_logs_no_api_key(openai, ddtrace_global_config, ddtrace_config_openai, mock_tracer): - """When no DD_API_KEY is set, the patching fails""" - pass - - -@pytest.mark.parametrize("ddtrace_config_openai", [dict(logs_enabled=True, log_prompt_completion_sample_rate=1.0)]) -def test_logs_completions(openai_vcr, openai, ddtrace_config_openai, mock_logs, mock_tracer): - """Ensure logs are emitted for completion endpoints when configured. - - Also ensure the logs have the correct tagging including the trace-logs correlation tagging. - """ - with openai_vcr.use_cassette("completion.yaml"): - client = openai.OpenAI() - client.completions.create( - model="ada", prompt="Hello world", temperature=0.8, n=2, stop=".", max_tokens=10, user="ddtrace-test" - ) - - span = mock_tracer.pop_traces()[0][0] - trace_id, span_id = span.trace_id, span.span_id - - assert mock_logs.enqueue.call_count == 1 - mock_logs.assert_has_calls( - [ - mock.call.start(), - mock.call.enqueue( - { - "timestamp": mock.ANY, - "message": mock.ANY, - "hostname": mock.ANY, - "ddsource": "openai", - "service": "tests.contrib.openai", - "status": "info", - "ddtags": "env:,version:,openai.request.endpoint:/v1/completions,openai.request.method:POST,openai.request.model:ada,openai.organization.name:datadog-4,openai.user.api_key:sk-...key>", # noqa: E501 - "dd.trace_id": "{:x}".format(trace_id), - "dd.span_id": str(span_id), - "prompt": "Hello world", - "choices": mock.ANY, - } - ), - ] - ) - - -@pytest.mark.parametrize("ddtrace_config_openai", [dict(logs_enabled=True, log_prompt_completion_sample_rate=1.0)]) -def test_global_tags(openai_vcr, ddtrace_config_openai, openai, mock_metrics, mock_logs, mock_tracer): +def test_global_tags(openai_vcr, openai, mock_tracer): """ When the global config UST tags are set The service name should be used for all data @@ -288,32 +169,6 @@ def test_global_tags(openai_vcr, ddtrace_config_openai, openai, mock_metrics, mo assert span.get_tag("openai.organization.name") == "datadog-4" assert span.get_tag("openai.user.api_key") == "sk-...key>" - for _, _args, kwargs in mock_metrics.mock_calls: - expected_metrics = [ - "service:test-svc", - "env:staging", - "version:1234", - "openai.request.model:ada", - "model:ada", - "openai.request.endpoint:/v1/completions", - "openai.request.method:POST", - "openai.organization.name:datadog-4", - "openai.user.api_key:sk-...key>", - ] - actual_tags = kwargs.get("tags") - for m in expected_metrics: - assert m in actual_tags - - for call, args, _kwargs in mock_logs.mock_calls: - if call != "enqueue": - continue - log = args[0] - assert log["service"] == "test-svc" - assert ( - log["ddtags"] - == "env:staging,version:1234,openai.request.endpoint:/v1/completions,openai.request.method:POST,openai.request.model:ada,openai.organization.name:datadog-4,openai.user.api_key:sk-...key>" # noqa: E501 - ) - def test_completion_raw_response(openai, openai_vcr, snapshot_tracer): with snapshot_context( @@ -440,20 +295,6 @@ def test_chat_completion_raw_response(openai, openai_vcr, snapshot_tracer): ) -@pytest.mark.parametrize("ddtrace_config_openai", [dict(metrics_enabled=b) for b in [True, False]]) -def test_enable_metrics(openai, openai_vcr, ddtrace_config_openai, mock_metrics, mock_tracer): - """Ensure the metrics_enabled configuration works.""" - with openai_vcr.use_cassette("completion.yaml"): - client = openai.OpenAI() - client.completions.create( - model="ada", prompt="Hello world", temperature=0.8, n=2, stop=".", max_tokens=10, user="ddtrace-test" - ) - if ddtrace_config_openai["metrics_enabled"]: - assert mock_metrics.mock_calls - else: - assert not mock_metrics.mock_calls - - @pytest.mark.parametrize("api_key_in_env", [True, False]) async def test_achat_completion(api_key_in_env, request_api_key, openai, openai_vcr, snapshot_tracer): with snapshot_context( @@ -510,47 +351,6 @@ async def test_image_acreate(api_key_in_env, request_api_key, openai, openai_vcr ) -@pytest.mark.parametrize("ddtrace_config_openai", [dict(logs_enabled=True, log_prompt_completion_sample_rate=1.0)]) -def test_logs_image_create(openai_vcr, openai, ddtrace_config_openai, mock_logs, mock_tracer): - """Ensure logs are emitted for image endpoints when configured. - - Also ensure the logs have the correct tagging including the trace-logs correlation tagging. - """ - with openai_vcr.use_cassette("image_create.yaml"): - client = openai.OpenAI() - client.images.generate( - prompt="sleepy capybara with monkey on top", - n=1, - size="256x256", - response_format="url", - user="ddtrace-test", - ) - span = mock_tracer.pop_traces()[0][0] - trace_id, span_id = span.trace_id, span.span_id - - assert mock_logs.enqueue.call_count == 1 - mock_logs.assert_has_calls( - [ - mock.call.start(), - mock.call.enqueue( - { - "timestamp": mock.ANY, - "message": mock.ANY, - "hostname": mock.ANY, - "ddsource": "openai", - "service": "tests.contrib.openai", - "status": "info", - "ddtags": "env:,version:,openai.request.endpoint:/v1/images/generations,openai.request.method:POST,openai.request.model:dall-e,openai.organization.name:datadog-4,openai.user.api_key:sk-...key>", # noqa: E501 - "dd.trace_id": "{:x}".format(trace_id), - "dd.span_id": str(span_id), - "prompt": "sleepy capybara with monkey on top", - "choices": mock.ANY, - } - ), - ] - ) - - # TODO: Note that vcr tests for image edit/variation don't work as they error out when recording the vcr request, # during the payload decoding. We'll need to migrate those tests over once we can address this. @pytest.mark.snapshot( @@ -871,7 +671,7 @@ def test_span_finish_on_stream_error(openai, openai_vcr, snapshot_tracer): @pytest.mark.snapshot @pytest.mark.skipif(TIKTOKEN_AVAILABLE, reason="This test estimates token counts") -def test_completion_stream_est_tokens(openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_completion_stream_est_tokens(openai, openai_vcr, snapshot_tracer): with openai_vcr.use_cassette("completion_streamed.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: mock_encoding.return_value.encode.side_effect = lambda x: [1, 2] @@ -882,7 +682,7 @@ def test_completion_stream_est_tokens(openai, openai_vcr, mock_metrics, snapshot @pytest.mark.skipif(not TIKTOKEN_AVAILABLE, reason="This test computes token counts using tiktoken") @pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_completion_stream") -def test_completion_stream(openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_completion_stream(openai, openai_vcr, snapshot_tracer): with openai_vcr.use_cassette("completion_streamed.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: mock_encoding.return_value.encode.side_effect = lambda x: [1, 2] @@ -893,7 +693,7 @@ def test_completion_stream(openai, openai_vcr, mock_metrics, snapshot_tracer): @pytest.mark.skipif(not TIKTOKEN_AVAILABLE, reason="This test computes token counts using tiktoken") @pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_completion_stream") -async def test_completion_async_stream(openai, openai_vcr, mock_metrics, snapshot_tracer): +async def test_completion_async_stream(openai, openai_vcr, snapshot_tracer): with openai_vcr.use_cassette("completion_streamed.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: mock_encoding.return_value.encode.side_effect = lambda x: [1, 2] @@ -907,7 +707,7 @@ async def test_completion_async_stream(openai, openai_vcr, mock_metrics, snapsho reason="Streamed response context managers are only available v1.6.0+", ) @pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_completion_stream") -def test_completion_stream_context_manager(openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_completion_stream_context_manager(openai, openai_vcr, snapshot_tracer): with openai_vcr.use_cassette("completion_streamed.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: mock_encoding.return_value.encode.side_effect = lambda x: [1, 2] @@ -920,7 +720,7 @@ def test_completion_stream_context_manager(openai, openai_vcr, mock_metrics, sna parse_version(openai_module.version.VERSION) < (1, 26), reason="Stream options only available openai >= 1.26" ) @pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_chat_completion_stream") -def test_chat_completion_stream(openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_chat_completion_stream(openai, openai_vcr, snapshot_tracer): """Assert that streamed token chunk extraction logic works automatically.""" with openai_vcr.use_cassette("chat_completion_streamed_tokens.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: @@ -939,7 +739,7 @@ def test_chat_completion_stream(openai, openai_vcr, mock_metrics, snapshot_trace @pytest.mark.skipif( parse_version(openai_module.version.VERSION) < (1, 26), reason="Stream options only available openai >= 1.26" ) -def test_chat_completion_stream_explicit_no_tokens(openai, openai_vcr, mock_metrics, snapshot_tracer): +def test_chat_completion_stream_explicit_no_tokens(openai, openai_vcr, mock_tracer): """Assert that streamed token chunk extraction logic is avoided if explicitly set to False by the user.""" with openai_vcr.use_cassette("chat_completion_streamed.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: @@ -956,41 +756,22 @@ def test_chat_completion_stream_explicit_no_tokens(openai, openai_vcr, mock_metr user="ddtrace-test", n=None, ) - span = snapshot_tracer.current_span() chunks = [c for c in resp] assert len(chunks) == 15 completion = "".join([c.choices[0].delta.content for c in chunks if c.choices[0].delta.content is not None]) assert completion == expected_completion - expected_tags = [ - "version:", - "env:", - "service:tests.contrib.openai", - "openai.request.model:gpt-3.5-turbo", - "model:gpt-3.5-turbo", - "openai.request.endpoint:/v1/chat/completions", - "openai.request.method:POST", - "openai.organization.id:", - "openai.organization.name:datadog-4", - "openai.user.api_key:sk-...key>", - "error:0", - ] - assert mock.call.distribution("request.duration", span.duration_ns, tags=expected_tags) in mock_metrics.mock_calls - assert mock.call.gauge("ratelimit.requests", 3000, tags=expected_tags) in mock_metrics.mock_calls - assert mock.call.gauge("ratelimit.remaining.requests", 2999, tags=expected_tags) in mock_metrics.mock_calls - expected_tags += ["openai.estimated:true"] - if TIKTOKEN_AVAILABLE: - expected_tags = expected_tags[:-1] - assert mock.call.distribution("tokens.prompt", 8, tags=expected_tags) in mock_metrics.mock_calls - assert mock.call.distribution("tokens.completion", mock.ANY, tags=expected_tags) in mock_metrics.mock_calls - assert mock.call.distribution("tokens.total", mock.ANY, tags=expected_tags) in mock_metrics.mock_calls + span = mock_tracer.pop_traces()[0][0] + assert span.get_metric("openai.response.usage.prompt_tokens") == 8 + assert span.get_metric("openai.response.usage.completion_tokens") is not None + assert span.get_metric("openai.response.usage.total_tokens") is not None @pytest.mark.skipif( parse_version(openai_module.version.VERSION) < (1, 26, 0), reason="Streamed tokens available in 1.26.0+" ) @pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_chat_completion_stream") -async def test_chat_completion_async_stream(openai, openai_vcr, mock_metrics, snapshot_tracer): +async def test_chat_completion_async_stream(openai, openai_vcr, snapshot_tracer): with openai_vcr.use_cassette("chat_completion_streamed_tokens.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: mock_encoding.return_value.encode.side_effect = lambda x: [1, 2, 3, 4, 5, 6, 7, 8] @@ -1012,7 +793,7 @@ async def test_chat_completion_async_stream(openai, openai_vcr, mock_metrics, sn reason="Streamed response context managers are only available v1.6.0+, tokens available 1.26.0+", ) @pytest.mark.snapshot(token="tests.contrib.openai.test_openai.test_chat_completion_stream") -async def test_chat_completion_async_stream_context_manager(openai, openai_vcr, mock_metrics, snapshot_tracer): +async def test_chat_completion_async_stream_context_manager(openai, openai_vcr, snapshot_tracer): with openai_vcr.use_cassette("chat_completion_streamed_tokens.yaml"): with mock.patch("ddtrace.contrib.internal.openai.utils.encoding_for_model", create=True) as mock_encoding: mock_encoding.return_value.encode.side_effect = lambda x: [1, 2, 3, 4, 5, 6, 7, 8] @@ -1045,14 +826,7 @@ def test_integration_sync(openai_api_key, ddtrace_run_python_code_in_subprocess) pypath = [os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))] if "PYTHONPATH" in env: pypath.append(env["PYTHONPATH"]) - env.update( - { - "OPENAI_API_KEY": openai_api_key, - "PYTHONPATH": ":".join(pypath), - # Disable metrics because the test agent doesn't support metrics - "DD_OPENAI_METRICS_ENABLED": "false", - } - ) + env.update({"OPENAI_API_KEY": openai_api_key, "PYTHONPATH": ":".join(pypath)}) out, err, status, pid = ddtrace_run_python_code_in_subprocess( """ import openai @@ -1092,14 +866,7 @@ def test_integration_async(openai_api_key, ddtrace_run_python_code_in_subprocess pypath = [os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))] if "PYTHONPATH" in env: pypath.append(env["PYTHONPATH"]) - env.update( - { - "OPENAI_API_KEY": openai_api_key, - "PYTHONPATH": ":".join(pypath), - # Disable metrics because the test agent doesn't support metrics - "DD_OPENAI_METRICS_ENABLED": "false", - } - ) + env.update({"OPENAI_API_KEY": openai_api_key, "PYTHONPATH": ":".join(pypath)}) out, err, status, pid = ddtrace_run_python_code_in_subprocess( """ import asyncio @@ -1247,36 +1014,13 @@ def test_completion_truncation(openai, openai_vcr, mock_tracer, ddtrace_config_o @pytest.mark.parametrize("ddtrace_config_openai", [dict(span_prompt_completion_sample_rate=0)]) -def test_embedding_unsampled_prompt_completion(openai, openai_vcr, ddtrace_config_openai, mock_logs, mock_tracer): +def test_embedding_unsampled_prompt_completion(openai, openai_vcr, ddtrace_config_openai, mock_tracer): with openai_vcr.use_cassette("embedding.yaml"): client = openai.OpenAI() client.embeddings.create(input="hello world", model="text-embedding-ada-002") - logs = mock_logs.enqueue.call_count traces = mock_tracer.pop_traces() assert len(traces) == 1 assert traces[0][0].get_tag("openai.request.input") is None - assert logs == 0 - - -@pytest.mark.parametrize( - "ddtrace_config_openai", - [dict(logs_enabled=True, log_prompt_completion_sample_rate=r) for r in [0, 0.25, 0.75, 1]], -) -def test_logs_sample_rate(openai, openai_vcr, ddtrace_config_openai, mock_logs, mock_tracer): - total_calls = 200 - for _ in range(total_calls): - with openai_vcr.use_cassette("completion.yaml"): - client = openai.OpenAI() - client.completions.create(model="ada", prompt="Hello world", temperature=0.8, n=2, stop=".", max_tokens=10) - - logs = mock_logs.enqueue.call_count - if ddtrace.config.openai["log_prompt_completion_sample_rate"] == 0: - assert logs == 0 - elif ddtrace.config.openai["log_prompt_completion_sample_rate"] == 1: - assert logs == total_calls - else: - rate = ddtrace.config.openai["log_prompt_completion_sample_rate"] * total_calls - assert (rate - 30) < logs < (rate + 30) def test_est_tokens(): @@ -1489,14 +1233,7 @@ def test_integration_service_name(openai_api_key, ddtrace_run_python_code_in_sub pypath = [os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))] if "PYTHONPATH" in env: pypath.append(env["PYTHONPATH"]) - env.update( - { - "OPENAI_API_KEY": openai_api_key, - "PYTHONPATH": ":".join(pypath), - # Disable metrics because the test agent doesn't support metrics - "DD_OPENAI_METRICS_ENABLED": "false", - } - ) + env.update({"OPENAI_API_KEY": openai_api_key, "PYTHONPATH": ":".join(pypath)}) if schema_version: env["DD_TRACE_SPAN_ATTRIBUTE_SCHEMA"] = schema_version if service_name: diff --git a/tests/contrib/psycopg/test_psycopg.py b/tests/contrib/psycopg/test_psycopg.py index 8e13ecc4128..60f640e65d3 100644 --- a/tests/contrib/psycopg/test_psycopg.py +++ b/tests/contrib/psycopg/test_psycopg.py @@ -42,7 +42,7 @@ def _get_conn(self, service=None): conn = psycopg.connect(**POSTGRES_CONFIG) pin = Pin.get_from(conn) if pin: - pin.clone(service=service, tracer=self.tracer).onto(conn) + pin._clone(service=service, tracer=self.tracer).onto(conn) return conn @@ -140,7 +140,7 @@ def test_psycopg3_connection_with_string(self): configs_arr.append("options='-c statement_timeout=1000 -c lock_timeout=250'") conn = psycopg.connect(" ".join(configs_arr)) - Pin.get_from(conn).clone(service="postgres", tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(service="postgres", tracer=self.tracer).onto(conn) self.assert_conn_is_traced(conn, "postgres") def test_opentracing_propagation(self): @@ -522,7 +522,7 @@ def test_connection_instance_method_patch(self): pin = Pin.get_from(connection) if pin: - pin.clone(service="postgres", tracer=self.tracer).onto(connection) + pin._clone(service="postgres", tracer=self.tracer).onto(connection) query = SQL("""select 'one' as x""") cur = connection.execute(query) diff --git a/tests/contrib/psycopg/test_psycopg_async.py b/tests/contrib/psycopg/test_psycopg_async.py index 7e4fbd59624..30b8ed6c2a2 100644 --- a/tests/contrib/psycopg/test_psycopg_async.py +++ b/tests/contrib/psycopg/test_psycopg_async.py @@ -36,7 +36,7 @@ async def _get_conn(self, service=None): conn = await psycopg.AsyncConnection.connect(**POSTGRES_CONFIG) pin = Pin.get_from(conn) if pin: - pin.clone(service=service, tracer=self.tracer).onto(conn) + pin._clone(service=service, tracer=self.tracer).onto(conn) return conn diff --git a/tests/contrib/psycopg2/test_psycopg.py b/tests/contrib/psycopg2/test_psycopg.py index 902d24d3c0e..fd4d8a02fbe 100644 --- a/tests/contrib/psycopg2/test_psycopg.py +++ b/tests/contrib/psycopg2/test_psycopg.py @@ -49,7 +49,7 @@ def _get_conn(self, service=None): conn = psycopg2.connect(**POSTGRES_CONFIG) pin = Pin.get_from(conn) if pin: - pin.clone(service=service, tracer=self.tracer).onto(conn) + pin._clone(service=service, tracer=self.tracer).onto(conn) return conn @@ -146,7 +146,7 @@ def test_psycopg2_connection_with_string(self): configs_arr.append("options='-c statement_timeout=1000 -c lock_timeout=250'") conn = psycopg2.connect(" ".join(configs_arr)) - Pin.get_from(conn).clone(service="postgres", tracer=self.tracer).onto(conn) + Pin.get_from(conn)._clone(service="postgres", tracer=self.tracer).onto(conn) self.assert_conn_is_traced(conn, "postgres") def test_opentracing_propagation(self): diff --git a/tests/contrib/pylibmc/test.py b/tests/contrib/pylibmc/test.py index 9de012439dc..0dd4e0102bc 100644 --- a/tests/contrib/pylibmc/test.py +++ b/tests/contrib/pylibmc/test.py @@ -316,7 +316,7 @@ def get_client(self): client = pylibmc.Client([url]) client.flush_all() - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) return client, self.tracer @@ -329,7 +329,7 @@ class TestPylibmcPatch(TestPylibmcPatchDefault): def get_client(self): client, tracer = TestPylibmcPatchDefault.get_client(self) - Pin.get_from(client).clone(service=self.TEST_SERVICE).onto(client) + Pin.get_from(client)._clone(service=self.TEST_SERVICE).onto(client) return client, tracer @@ -341,7 +341,7 @@ def test_patch_unpatch(self): patch() client = pylibmc.Client([url]) - Pin.get_from(client).clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) client.set("a", 1) @@ -362,7 +362,9 @@ def test_patch_unpatch(self): patch() client = pylibmc.Client([url]) - Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + pin = Pin(service=self.TEST_SERVICE) + pin._tracer = self.tracer + pin.onto(client) client.set("a", 1) spans = self.pop_spans() diff --git a/tests/contrib/pymemcache/test_client.py b/tests/contrib/pymemcache/test_client.py index 19a7a93d523..09c8d33e007 100644 --- a/tests/contrib/pymemcache/test_client.py +++ b/tests/contrib/pymemcache/test_client.py @@ -263,7 +263,7 @@ def test_stats(self): def test_service_name_override(self): client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"]) - Pin.override(client, service="testsvcname") + Pin._override(client, service="testsvcname") client.set(b"key", b"value", noreply=False) result = client.get(b"key") assert _str(result) == "value" @@ -280,7 +280,7 @@ def make_client(self, mock_socket_values, **kwargs): from pymemcache.client.hash import HashClient tracer = DummyTracer() - Pin.override(pymemcache, tracer=tracer) + Pin._override(pymemcache, tracer=tracer) self.client = HashClient([(TEST_HOST, TEST_PORT)], **kwargs) class _MockClient(Client): @@ -320,7 +320,7 @@ def test_service_name_override_hashclient(self): client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"]) assert len(client.clients) == 1 for _c in client.clients.values(): - Pin.override(_c, service="testsvcname") + Pin._override(_c, service="testsvcname") client.set(b"key", b"value", noreply=False) result = client.get(b"key") assert _str(result) == "value" @@ -332,7 +332,7 @@ def test_service_name_override_hashclient(self): def test_service_name_override_hashclient_pooling(self): client = self.make_client([b""], use_pooling=True) - Pin.override(client, service="testsvcname") + Pin._override(client, service="testsvcname") client.set(b"key", b"value") assert len(client.clients) == 1 spans = self.get_spans() @@ -351,7 +351,7 @@ def tearDown(self): def make_client(self, mock_socket_values, **kwargs): tracer = DummyTracer() - Pin.override(pymemcache, tracer=tracer) + Pin._override(pymemcache, tracer=tracer) self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs) self.client.sock = MockSocket(list(mock_socket_values)) return self.client @@ -365,7 +365,7 @@ def test_same_tracer(self): def test_override_parent_pin(self): """Test that the service set on `pymemcache` is used for Clients.""" - Pin.override(pymemcache, service="mysvc") + Pin._override(pymemcache, service="mysvc") client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"]) client.set(b"key", b"value", noreply=False) @@ -378,7 +378,7 @@ def test_override_parent_pin(self): def test_override_client_pin(self): """Test that the service set on `pymemcache` is used for Clients.""" client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"]) - Pin.override(client, service="mysvc2") + Pin._override(client, service="mysvc2") client.set(b"key", b"value", noreply=False) diff --git a/tests/contrib/pymemcache/test_client_defaults.py b/tests/contrib/pymemcache/test_client_defaults.py index 0b5e44aa3b0..302be1b0abd 100644 --- a/tests/contrib/pymemcache/test_client_defaults.py +++ b/tests/contrib/pymemcache/test_client_defaults.py @@ -18,7 +18,7 @@ def client(tracer): try: patch() - Pin.override(pymemcache, tracer=tracer) + Pin._override(pymemcache, tracer=tracer) with override_config("pymemcache", dict(command_enabled=False)): client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT)) yield client diff --git a/tests/contrib/pymemcache/test_client_mixin.py b/tests/contrib/pymemcache/test_client_mixin.py index 2d471765e1f..b47c6d8b154 100644 --- a/tests/contrib/pymemcache/test_client_mixin.py +++ b/tests/contrib/pymemcache/test_client_mixin.py @@ -68,7 +68,7 @@ def tearDown(self): def make_client(self, mock_socket_values, **kwargs): tracer = DummyTracer() - Pin.override(pymemcache, tracer=tracer) + Pin._override(pymemcache, tracer=tracer) self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs) self.client.sock = MockSocket(list(mock_socket_values)) return self.client diff --git a/tests/contrib/pymongo/test.py b/tests/contrib/pymongo/test.py index b6669d40ac0..31bd45b4674 100644 --- a/tests/contrib/pymongo/test.py +++ b/tests/contrib/pymongo/test.py @@ -420,7 +420,7 @@ def test_patch_pymongo_client_after_import(self): tracer = DummyTracer() client = MongoClient(port=MONGO_CONFIG["port"]) # Ensure the dummy tracer is used to create span in the pymongo integration - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) # Ensure that the client is traced client.server_info() spans = tracer.pop() @@ -440,7 +440,7 @@ def tearDown(self): def get_tracer_and_client(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) return tracer, client def test_host_kwarg(self): @@ -471,7 +471,9 @@ def tearDown(self): def get_tracer_and_client(self): client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin(service="pymongo", tracer=self.tracer).onto(client) + pin = Pin(service="pymongo") + pin._tracer = self.tracer + pin.onto(client) return self.tracer, client def test_patch_unpatch(self): @@ -480,7 +482,7 @@ def test_patch_unpatch(self): patch() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client["testdb"].drop_collection("whatever") spans = self.pop_spans() @@ -500,7 +502,7 @@ def test_patch_unpatch(self): patch() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=self.tracer).onto(client) + Pin.get_from(client)._clone(tracer=self.tracer).onto(client) client["testdb"].drop_collection("whatever") spans = self.pop_spans() @@ -520,7 +522,7 @@ def test_user_specified_service_default(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -539,7 +541,7 @@ def test_user_specified_service_v0(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -559,7 +561,7 @@ def test_user_specified_service_default_override(self): assert cfg.service == "new-mongo", f"service name is {cfg.service}" tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() @@ -579,7 +581,7 @@ def test_user_specified_service_v1(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -593,7 +595,7 @@ def test_unspecified_service_v0(self): """ tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -605,8 +607,10 @@ def test_unspecified_service_v0(self): def test_user_specified_pymongo_service_v0(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin(service="mypymongo", tracer=self.tracer).onto(client) - Pin.get_from(client).clone(tracer=tracer).onto(client) + pin = Pin(service="mypymongo") + pin._tracer = self.tracer + pin.onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -618,8 +622,10 @@ def test_user_specified_pymongo_service_v0(self): def test_user_specified_pymongo_service_v1(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin(service="mypymongo", tracer=self.tracer).onto(client) - Pin.get_from(client).clone(tracer=tracer).onto(client) + pin = Pin(service="mypymongo") + pin._tracer = self.tracer + pin.onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -631,8 +637,10 @@ def test_user_specified_pymongo_service_v1(self): def test_service_precedence_v0(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin(service="mypymongo", tracer=self.tracer).onto(client) - Pin.get_from(client).clone(tracer=tracer).onto(client) + pin = Pin(service="mypymongo") + pin._tracer = self.tracer + pin.onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -644,8 +652,10 @@ def test_service_precedence_v0(self): def test_service_precedence_v1(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin(service="mypymongo", tracer=self.tracer).onto(client) - Pin.get_from(client).clone(tracer=tracer).onto(client) + pin = Pin(service="mypymongo") + pin._tracer = self.tracer + pin.onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -658,7 +668,7 @@ def test_operation_name_v0_schema(self): """ tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -671,7 +681,7 @@ def test_operation_name_v1_schema(self): """ tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) client["testdb"].drop_collection("whatever") spans = tracer.pop() assert len(spans) == 2 @@ -681,7 +691,7 @@ def test_operation_name_v1_schema(self): def test_peer_service_tagging(self): tracer = DummyTracer() client = pymongo.MongoClient(port=MONGO_CONFIG["port"]) - Pin.get_from(client).clone(tracer=tracer).onto(client) + Pin.get_from(client)._clone(tracer=tracer).onto(client) db_name = "testdb" client[db_name].drop_collection("whatever") spans = tracer.pop() @@ -757,13 +767,13 @@ def setUp(self): super(TestPymongoSocketTracing, self).setUp() patch() # Override server pin's tracer with our dummy tracer - Pin.override(Server, tracer=self.tracer) + Pin._override(Server, tracer=self.tracer) # maxPoolSize controls the number of sockets that the client can instantiate # and choose from to perform classic operations. For the sake of our tests, # let's limit this number to 1 self.client = pymongo.MongoClient(port=MONGO_CONFIG["port"], maxPoolSize=1) # Override MongoClient's pin's tracer with our dummy tracer - Pin.override(self.client, tracer=self.tracer, service="testdb") + Pin._override(self.client, tracer=self.tracer, service="testdb") def tearDown(self): unpatch() diff --git a/tests/contrib/pymysql/test_pymysql.py b/tests/contrib/pymysql/test_pymysql.py index e94e03c8395..9638289e6fc 100644 --- a/tests/contrib/pymysql/test_pymysql.py +++ b/tests/contrib/pymysql/test_pymysql.py @@ -347,7 +347,7 @@ def _get_conn_tracer(self): assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer).onto(self.conn) + pin._clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer @@ -363,7 +363,7 @@ def test_patch_unpatch(self): conn = pymysql.connect(**MYSQL_CONFIG) pin = Pin.get_from(conn) assert pin - pin.clone(tracer=self.tracer).onto(conn) + pin._clone(tracer=self.tracer).onto(conn) assert not conn._closed cursor = conn.cursor() @@ -396,7 +396,7 @@ def test_patch_unpatch(self): def test_user_pin_override(self): conn, tracer = self._get_conn_tracer() pin = Pin.get_from(conn) - pin.clone(service="pin-svc", tracer=self.tracer).onto(conn) + pin._clone(service="pin-svc", tracer=self.tracer).onto(conn) cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() diff --git a/tests/contrib/pynamodb/test_pynamodb.py b/tests/contrib/pynamodb/test_pynamodb.py index 33b4e4c2c14..d0f459011fc 100644 --- a/tests/contrib/pynamodb/test_pynamodb.py +++ b/tests/contrib/pynamodb/test_pynamodb.py @@ -22,7 +22,7 @@ def setUp(self): self.conn.session.set_credentials("aws-access-key", "aws-secret-access-key", "session-token") super(PynamodbTest, self).setUp() - Pin.override(self.conn, tracer=self.tracer) + Pin._override(self.conn, tracer=self.tracer) def tearDown(self): super(PynamodbTest, self).tearDown() @@ -268,7 +268,7 @@ def test_env_user_specified_pynamodb_service(self): # Manual override dynamodb_backend.create_table("Test", hash_key_attr="content", hash_key_type="S") - Pin.override(self.conn, service="mypynamodb", tracer=self.tracer) + Pin._override(self.conn, service="mypynamodb", tracer=self.tracer) list_result = self.conn.list_tables() span = self.get_spans()[0] assert span.service == "mypynamodb", span.service @@ -289,7 +289,7 @@ def test_service_precedence(self): # Manual override dynamodb_backend.create_table("Test", hash_key_attr="content", hash_key_type="S") - Pin.override(self.conn, service="override-pynamodb", tracer=self.tracer) + Pin._override(self.conn, service="override-pynamodb", tracer=self.tracer) list_result = self.conn.list_tables() span = self.get_spans()[0] assert span.service == "override-pynamodb", span.service diff --git a/tests/contrib/pyodbc/test_pyodbc.py b/tests/contrib/pyodbc/test_pyodbc.py index 4c965aede7b..3f78f622ef8 100644 --- a/tests/contrib/pyodbc/test_pyodbc.py +++ b/tests/contrib/pyodbc/test_pyodbc.py @@ -211,7 +211,7 @@ def _get_conn_tracer(self): assert pin # Customize the service # we have to apply it on the existing one since new one won't inherit `app` - pin.clone(tracer=self.tracer).onto(self.conn) + pin._clone(tracer=self.tracer).onto(self.conn) return self.conn, self.tracer @@ -227,7 +227,7 @@ def test_patch_unpatch(self): conn = pyodbc.connect(PYODBC_CONNECT_DSN) pin = Pin.get_from(conn) assert pin - pin.clone(tracer=self.tracer).onto(conn) + pin._clone(tracer=self.tracer).onto(conn) cursor = conn.cursor() cursor.execute("SELECT 1") @@ -256,7 +256,7 @@ def test_patch_unpatch(self): def test_user_pin_override(self): conn, tracer = self._get_conn_tracer() pin = Pin.get_from(conn) - pin.clone(service="pin-svc", tracer=self.tracer).onto(conn) + pin._clone(service="pin-svc", tracer=self.tracer).onto(conn) cursor = conn.cursor() cursor.execute("SELECT 1") rows = cursor.fetchall() diff --git a/tests/contrib/redis/test_redis.py b/tests/contrib/redis/test_redis.py index fb83f6f53fc..7a5ee5d92a0 100644 --- a/tests/contrib/redis/test_redis.py +++ b/tests/contrib/redis/test_redis.py @@ -25,7 +25,7 @@ def setUp(self): patch() r = redis.Redis(port=self.TEST_PORT) r.flushall() - Pin.override(r, tracer=self.tracer) + Pin._override(r, tracer=self.tracer) self.r = r def tearDown(self): @@ -194,7 +194,7 @@ def test_meta_override(self): r = self.r pin = Pin.get_from(r) if pin: - pin.clone(tags={"cheese": "camembert"}).onto(r) + pin._clone(tags={"cheese": "camembert"}).onto(r) r.get("cheese") spans = self.get_spans() @@ -211,7 +211,7 @@ def test_patch_unpatch(self): patch() r = redis.Redis(port=REDIS_CONFIG["port"]) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -231,7 +231,7 @@ def test_patch_unpatch(self): patch() r = redis.Redis(port=REDIS_CONFIG["port"]) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -429,7 +429,7 @@ def test_env_user_specified_redis_service_v0(self): self.reset() # Manual override - Pin.override(self.r, service="mysvc", tracer=self.tracer) + Pin._override(self.r, service="mysvc", tracer=self.tracer) self.r.get("cheese") span = self.get_spans()[0] assert span.service == "mysvc", span.service @@ -447,7 +447,7 @@ def test_service_precedence_v0(self): self.reset() # Do a manual override - Pin.override(self.r, service="override-redis", tracer=self.tracer) + Pin._override(self.r, service="override-redis", tracer=self.tracer) self.r.get("cheese") span = self.get_spans()[0] assert span.service == "override-redis", span.service @@ -501,7 +501,7 @@ def test_meta_override(self): r = self.r pin = Pin.get_from(r) if pin: - pin.clone(tags={"cheese": "camembert"}).onto(r) + pin._clone(tags={"cheese": "camembert"}).onto(r) r.get("cheese") @@ -513,7 +513,7 @@ def test_patch_unpatch(self): patch() r = redis.Redis(port=REDIS_CONFIG["port"]) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -533,7 +533,7 @@ def test_patch_unpatch(self): patch() r = redis.Redis(port=REDIS_CONFIG["port"]) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -576,7 +576,7 @@ def test_env_user_specified_redis_service(self): self.reset() # Manual override - Pin.override(self.r, service="mysvc", tracer=self.tracer) + Pin._override(self.r, service="mysvc", tracer=self.tracer) self.r.get("cheese") @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="env-redis")) @@ -587,7 +587,7 @@ def test_service_precedence(self): self.reset() # Do a manual override - Pin.override(self.r, service="override-redis", tracer=self.tracer) + Pin._override(self.r, service="override-redis", tracer=self.tracer) self.r.get("cheese") @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_REDIS_CMD_MAX_LENGTH="10")) diff --git a/tests/contrib/redis/test_redis_asyncio.py b/tests/contrib/redis/test_redis_asyncio.py index b1bd5858e04..72fc0f47c63 100644 --- a/tests/contrib/redis/test_redis_asyncio.py +++ b/tests/contrib/redis/test_redis_asyncio.py @@ -130,7 +130,7 @@ async def test_override_service_name(redis_client): @pytest.mark.snapshot(wait_for_num_traces=1) async def test_pin(redis_client): - Pin.override(redis_client, service="my-redis") + Pin._override(redis_client, service="my-redis") val = await redis_client.get("cheese") assert val is None diff --git a/tests/contrib/redis/test_redis_cluster.py b/tests/contrib/redis/test_redis_cluster.py index 2731a18fcee..54fb778987a 100644 --- a/tests/contrib/redis/test_redis_cluster.py +++ b/tests/contrib/redis/test_redis_cluster.py @@ -26,7 +26,7 @@ def setUp(self): patch() r = self._get_test_client() r.flushall() - Pin.override(r, tracer=self.tracer) + Pin._override(r, tracer=self.tracer) self.r = r def tearDown(self): @@ -103,7 +103,7 @@ def test_patch_unpatch(self): patch() r = self._get_test_client() - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -123,7 +123,7 @@ def test_patch_unpatch(self): patch() r = self._get_test_client() - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -142,7 +142,7 @@ def test_user_specified_service_v0(self): assert config.service == "mysvc" r = self._get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() @@ -162,7 +162,7 @@ def test_user_specified_service_v1(self): assert config.service == "mysvc" r = self._get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() diff --git a/tests/contrib/redis/test_redis_cluster_asyncio.py b/tests/contrib/redis/test_redis_cluster_asyncio.py index b8624c533aa..65b6a2348d3 100644 --- a/tests/contrib/redis/test_redis_cluster_asyncio.py +++ b/tests/contrib/redis/test_redis_cluster_asyncio.py @@ -28,7 +28,7 @@ async def traced_redis_cluster(tracer, test_spans): startup_nodes = [redis.asyncio.cluster.ClusterNode(TEST_HOST, int(port)) for port in TEST_PORTS.split(",")] redis_cluster = redis.asyncio.cluster.RedisCluster(startup_nodes=startup_nodes) await redis_cluster.flushall() - Pin.override(redis_cluster, tracer=tracer) + Pin._override(redis_cluster, tracer=tracer) try: yield redis_cluster, test_spans finally: @@ -125,7 +125,7 @@ async def test_patch_unpatch(redis_cluster): patch() r = redis_cluster - Pin.override(r, tracer=tracer) + Pin._override(r, tracer=tracer) await r.get("key") spans = tracer.pop() @@ -145,7 +145,7 @@ async def test_patch_unpatch(redis_cluster): patch() r = redis_cluster - Pin.override(r, tracer=tracer) + Pin._override(r, tracer=tracer) await r.get("key") spans = tracer.pop() @@ -182,7 +182,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() @@ -231,7 +231,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() @@ -280,7 +280,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() @@ -321,7 +321,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() @@ -362,7 +362,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() @@ -411,7 +411,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() @@ -456,7 +456,7 @@ async def test(): tracer = DummyTracer() test_spans = TracerSpanContainer(tracer) - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) await r.get("key") await r.close() diff --git a/tests/contrib/rediscluster/test.py b/tests/contrib/rediscluster/test.py index a2c5ac5c6b2..79b4c806440 100644 --- a/tests/contrib/rediscluster/test.py +++ b/tests/contrib/rediscluster/test.py @@ -43,7 +43,7 @@ def setUp(self): patch() r = _get_test_client() r.flushall() - Pin.override(r, tracer=self.tracer) + Pin._override(r, tracer=self.tracer) self.r = r def tearDown(self): @@ -115,7 +115,7 @@ def test_patch_unpatch(self): patch() r = _get_test_client() - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -135,7 +135,7 @@ def test_patch_unpatch(self): patch() r = _get_test_client() - Pin.get_from(r).clone(tracer=tracer).onto(r) + Pin.get_from(r)._clone(tracer=tracer).onto(r) r.get("key") spans = tracer.pop() @@ -154,7 +154,7 @@ def test_user_specified_service_v0(self): assert config.service == "mysvc" r = _get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() @@ -174,7 +174,7 @@ def test_user_specified_service_v1(self): assert config.service == "mysvc" r = _get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() @@ -189,7 +189,7 @@ def test_unspecified_service_v1(self): the default span service name """ r = _get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() @@ -220,7 +220,7 @@ def test_span_name_v0_schema(self): the default span service name """ r = _get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() @@ -235,7 +235,7 @@ def test_span_name_v1_schema(self): the default span service name """ r = _get_test_client() - Pin.get_from(r).clone(tracer=self.tracer).onto(r) + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) r.get("key") spans = self.get_spans() diff --git a/tests/contrib/rq/test_rq.py b/tests/contrib/rq/test_rq.py index d72871823da..65908ac9717 100644 --- a/tests/contrib/rq/test_rq.py +++ b/tests/contrib/rq/test_rq.py @@ -117,7 +117,7 @@ def test_sync_worker_config_service(queue): @snapshot(ignores=snapshot_ignores) def test_queue_pin_service(queue): - Pin.override(queue, service="my-pin-svc") + Pin._override(queue, service="my-pin-svc") job = queue.enqueue(job_add1, 10) worker = rq.SimpleWorker([queue], connection=queue.connection) worker.work(burst=True) @@ -128,7 +128,7 @@ def test_queue_pin_service(queue): def test_sync_worker_pin_service(queue): job = queue.enqueue(job_add1, 10) worker = rq.SimpleWorker([queue], connection=queue.connection) - Pin.override(worker, service="my-pin-svc") + Pin._override(worker, service="my-pin-svc") worker.work(burst=True) assert job.result == 11 diff --git a/tests/contrib/shared_tests.py b/tests/contrib/shared_tests.py index cf647a15628..dfc3b5e6a0a 100644 --- a/tests/contrib/shared_tests.py +++ b/tests/contrib/shared_tests.py @@ -72,8 +72,8 @@ def _test_dbm_propagation_comment_pin_service_name_override( """tests if dbm comment is set in mysql""" db_name = config["db"] - Pin.override(conn, service="pin-service-name-override", tracer=tracer) - Pin.override(cursor, service="pin-service-name-override", tracer=tracer) + Pin._override(conn, service="pin-service-name-override", tracer=tracer) + Pin._override(cursor, service="pin-service-name-override", tracer=tracer) dbm_comment = ( f"/*dddb='{db_name}',dddbs='pin-service-name-override',dde='staging',ddh='127.0.0.1',ddps='orders-app'," diff --git a/tests/contrib/shared_tests_async.py b/tests/contrib/shared_tests_async.py index 0d49f09d608..f06b6d278f5 100644 --- a/tests/contrib/shared_tests_async.py +++ b/tests/contrib/shared_tests_async.py @@ -72,8 +72,8 @@ async def _test_dbm_propagation_comment_pin_service_name_override( """tests if dbm comment is set in mysql""" db_name = config["db"] - Pin.override(conn, service="pin-service-name-override", tracer=tracer) - Pin.override(cursor, service="pin-service-name-override", tracer=tracer) + Pin._override(conn, service="pin-service-name-override", tracer=tracer) + Pin._override(cursor, service="pin-service-name-override", tracer=tracer) dbm_comment = ( f"/*dddb='{db_name}',dddbs='pin-service-name-override',dde='staging',ddh='127.0.0.1',ddps='orders-app'," diff --git a/tests/contrib/sqlalchemy/test_patch.py b/tests/contrib/sqlalchemy/test_patch.py index a6f08bb5f46..35245d5a906 100644 --- a/tests/contrib/sqlalchemy/test_patch.py +++ b/tests/contrib/sqlalchemy/test_patch.py @@ -25,7 +25,7 @@ def setUp(self): patch() dsn = "postgresql://%(user)s:%(password)s@%(host)s:%(port)s/%(dbname)s" % POSTGRES_CONFIG self.engine = sqlalchemy.create_engine(dsn) - Pin.override(self.engine, tracer=self.tracer) + Pin._override(self.engine, tracer=self.tracer) # prepare a connection self.conn = self.engine.connect() @@ -57,7 +57,7 @@ def test_engine_traced(self): def test_engine_pin_service(self): # ensures that the engine service is updated with the PIN object - Pin.override(self.engine, service="replica-db") + Pin._override(self.engine, service="replica-db") rows = self.conn.execute(text("SELECT 1")).fetchall() assert len(rows) == 1 diff --git a/tests/contrib/sqlite3/test_sqlite3.py b/tests/contrib/sqlite3/test_sqlite3.py index 6101dcfa081..df1bbdbabc5 100644 --- a/tests/contrib/sqlite3/test_sqlite3.py +++ b/tests/contrib/sqlite3/test_sqlite3.py @@ -64,7 +64,7 @@ def test_sqlite(self): db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin - pin.clone(service=service, tracer=self.tracer).onto(db) + pin._clone(service=service, tracer=self.tracer).onto(db) # Ensure we can run a query and it's correctly traced q = "select * from sqlite_master" @@ -216,7 +216,7 @@ def test_sqlite_ot(self): db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin - pin.clone(tracer=self.tracer).onto(db) + pin._clone(tracer=self.tracer).onto(db) cursor = db.execute(q) rows = cursor.fetchall() assert not rows @@ -233,7 +233,7 @@ def test_sqlite_ot(self): db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin - pin.clone(tracer=self.tracer).onto(db) + pin._clone(tracer=self.tracer).onto(db) cursor = db.execute(q) rows = cursor.fetchall() assert not rows @@ -270,7 +270,7 @@ def test_patch_unpatch(self): db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin - pin.clone(tracer=self.tracer).onto(db) + pin._clone(tracer=self.tracer).onto(db) db.cursor().execute("select 'blah'").fetchall() self.assert_structure( @@ -292,7 +292,7 @@ def test_patch_unpatch(self): db = sqlite3.connect(":memory:") pin = Pin.get_from(db) assert pin - pin.clone(tracer=self.tracer).onto(db) + pin._clone(tracer=self.tracer).onto(db) db.cursor().execute("select 'blah'").fetchall() self.assert_structure( @@ -301,7 +301,7 @@ def test_patch_unpatch(self): def _given_a_traced_connection(self, tracer): db = sqlite3.connect(":memory:") - Pin.get_from(db).clone(tracer=tracer).onto(db) + Pin.get_from(db)._clone(tracer=tracer).onto(db) return db @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) diff --git a/tests/contrib/starlette/test_starlette.py b/tests/contrib/starlette/test_starlette.py index f290ade8ea7..78b7ac135fe 100644 --- a/tests/contrib/starlette/test_starlette.py +++ b/tests/contrib/starlette/test_starlette.py @@ -38,7 +38,7 @@ def engine(): def tracer(engine): original_tracer = ddtrace.tracer tracer = DummyTracer() - Pin.override(engine, tracer=tracer) + Pin._override(engine, tracer=tracer) ddtrace.tracer = tracer starlette_patch() yield tracer diff --git a/tests/contrib/subprocess/test_subprocess.py b/tests/contrib/subprocess/test_subprocess.py index 40e7ab67431..ff6b30bb0dd 100644 --- a/tests/contrib/subprocess/test_subprocess.py +++ b/tests/contrib/subprocess/test_subprocess.py @@ -194,7 +194,7 @@ def test_truncation(cmdline_obj, expected_str, expected_list, truncated): def test_ossystem(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(os).clone(tracer=tracer).onto(os) + Pin.get_from(os)._clone(tracer=tracer).onto(os) with tracer.trace("ossystem_test"): ret = os.system("dir -l /") assert ret == 0 @@ -215,7 +215,7 @@ def test_ossystem(tracer): def test_fork(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(os).clone(tracer=tracer).onto(os) + Pin.get_from(os)._clone(tracer=tracer).onto(os) with tracer.trace("ossystem_test"): pid = os.fork() if pid == 0: @@ -241,7 +241,7 @@ def test_fork(tracer): def test_unpatch(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(os).clone(tracer=tracer).onto(os) + Pin.get_from(os)._clone(tracer=tracer).onto(os) with tracer.trace("os.system"): ret = os.system("dir -l /") assert ret == 0 @@ -253,8 +253,8 @@ def test_unpatch(tracer): assert span.get_tag(COMMANDS.SHELL) == "dir -l /" unpatch() - with override_global_config(dict(_asm_enabled=True)): - Pin.get_from(os).clone(tracer=tracer).onto(os) + with override_global_config(dict(_ep_enabled=False)): + Pin.get_from(os)._clone(tracer=tracer).onto(os) with tracer.trace("os.system_unpatch"): ret = os.system("dir -l /") assert ret == 0 @@ -273,7 +273,7 @@ def test_unpatch(tracer): def test_ossystem_noappsec(tracer): - with override_global_config(dict(_asm_enabled=False)): + with override_global_config(dict(_ep_enabled=False)): patch() assert not hasattr(os.system, "__wrapped__") assert not hasattr(os._spawnvef, "__wrapped__") @@ -283,7 +283,7 @@ def test_ossystem_noappsec(tracer): def test_ospopen(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("os.popen"): pipe = os.popen("dir -li /") content = pipe.read() @@ -330,7 +330,7 @@ def test_ospopen(tracer): def test_osspawn_variants(tracer, function, mode, arguments): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(os).clone(tracer=tracer).onto(os) + Pin.get_from(os)._clone(tracer=tracer).onto(os) if "_" in function.__name__: # wrapt changes function names when debugging @@ -369,7 +369,7 @@ def test_osspawn_variants(tracer, function, mode, arguments): def test_subprocess_init_shell_true(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("subprocess.Popen.init", span_type=SpanTypes.SYSTEM): subp = subprocess.Popen(["dir", "-li", "/"], shell=True) subp.wait() @@ -389,7 +389,7 @@ def test_subprocess_init_shell_true(tracer): def test_subprocess_init_shell_false(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("subprocess.Popen.init", span_type=SpanTypes.SYSTEM): subp = subprocess.Popen(["dir", "-li", "/"], shell=False) subp.wait() @@ -406,7 +406,7 @@ def test_subprocess_wait_shell_false(tracer): args = ["dir", "-li", "/"] with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("subprocess.Popen.init", span_type=SpanTypes.SYSTEM): subp = subprocess.Popen(args=args, shell=False) subp.wait() @@ -419,7 +419,7 @@ def test_subprocess_wait_shell_false(tracer): def test_subprocess_wait_shell_true(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("subprocess.Popen.init", span_type=SpanTypes.SYSTEM): subp = subprocess.Popen(args=["dir", "-li", "/"], shell=True) subp.wait() @@ -430,7 +430,7 @@ def test_subprocess_wait_shell_true(tracer): def test_subprocess_run(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("subprocess.Popen.wait"): result = subprocess.run(["dir", "-l", "/"], shell=True) assert result.returncode == 0 @@ -451,7 +451,7 @@ def test_subprocess_run(tracer): def test_subprocess_communicate(tracer): with override_global_config(dict(_asm_enabled=True)): patch() - Pin.get_from(subprocess).clone(tracer=tracer).onto(subprocess) + Pin.get_from(subprocess)._clone(tracer=tracer).onto(subprocess) with tracer.trace("subprocess.Popen.wait"): subp = subprocess.Popen(args=["dir", "-li", "/"], shell=True) subp.communicate() diff --git a/tests/contrib/subprocess/test_subprocess_patch.py b/tests/contrib/subprocess/test_subprocess_patch.py index 57778f798c1..471f096fbae 100644 --- a/tests/contrib/subprocess/test_subprocess_patch.py +++ b/tests/contrib/subprocess/test_subprocess_patch.py @@ -19,6 +19,8 @@ class TestSubprocessPatch(PatchTestCase.Base): def __init__(self, *args, **kwargs): asm_config._asm_enabled = True + asm_config._ep_enabled = True + asm_config._load_modules = True super(TestSubprocessPatch, self).__init__(*args, **kwargs) def assert_module_patched(self, subprocess): diff --git a/tests/contrib/suitespec.yml b/tests/contrib/suitespec.yml index 366e28aaaf9..2f857e300fd 100644 --- a/tests/contrib/suitespec.yml +++ b/tests/contrib/suitespec.yml @@ -202,8 +202,6 @@ components: - ddtrace/contrib/internal/redis/* - ddtrace/contrib/_aredis.py - ddtrace/contrib/internal/aredis/* - - ddtrace/contrib/_yaaredis.py - - ddtrace/contrib/internal/yaaredis/* - ddtrace/_trace/utils_redis.py - ddtrace/contrib/internal/redis_utils.py - ddtrace/ext/redis.py @@ -239,6 +237,11 @@ components: urllib3: - ddtrace/contrib/_urllib3.py - ddtrace/contrib/internal/urllib3/* + valkey: + - ddtrace/contrib/internal/valkey/* + - ddtrace/contrib/internal/valkey_utils.py + - ddtrace/_trace/utils_valkey.py + - ddtrace/ext/valkey.py vertica: - ddtrace/contrib/_vertica.py - ddtrace/contrib/internal/vertica/* @@ -1162,17 +1165,19 @@ suites: - tests/snapshots/tests.contrib.wsgi.* runner: riot snapshot: true - yaaredis: + valkey: + parallelism: 5 paths: - - '@core' - '@bootstrap' + - '@core' - '@contrib' - '@tracing' - - '@redis' - - tests/contrib/yaaredis/* - - tests/snapshots/tests.contrib.yaaredis.* - pattern: yaaredis$ + - '@valkey' + - tests/contrib/valkey/* + - tests/snapshots/tests.contrib.valkey.* + pattern: ^valkey* runner: riot services: - - redis + - valkeycluster + - valkey snapshot: true diff --git a/tests/contrib/tornado/test_config.py b/tests/contrib/tornado/test_config.py index aaa87fcb2ec..0130db034eb 100644 --- a/tests/contrib/tornado/test_config.py +++ b/tests/contrib/tornado/test_config.py @@ -1,6 +1,5 @@ from ddtrace.trace import TraceFilter -from ddtrace.trace import Tracer -from tests.utils import DummyWriter +from tests.utils import DummyTracer from .utils import TornadoTestCase @@ -19,8 +18,7 @@ class TestTornadoSettings(TornadoTestCase): """ def get_app(self): - # Override with a real tracer - self.tracer = Tracer() + self.tracer = DummyTracer() super(TestTornadoSettings, self).get_app() def get_settings(self): @@ -40,25 +38,6 @@ def get_settings(self): }, } - def test_tracer_is_properly_configured(self): - # the tracer must be properly configured - assert self.tracer._tags.get("env") == "production" - assert self.tracer._tags.get("debug") == "false" - assert self.tracer.enabled is False - assert self.tracer.agent_trace_url == "http://dd-agent.service.consul:8126" - - writer = DummyWriter() - self.tracer._configure(enabled=True, writer=writer) - with self.tracer.trace("keep"): - pass - spans = writer.pop() - assert len(spans) == 1 - - with self.tracer.trace("drop"): - pass - spans = writer.pop() - assert len(spans) == 0 - class TestTornadoSettingsEnabled(TornadoTestCase): def get_settings(self): diff --git a/tests/contrib/urllib3/test_urllib3.py b/tests/contrib/urllib3/test_urllib3.py index 24ba7815e56..370c08c7904 100644 --- a/tests/contrib/urllib3/test_urllib3.py +++ b/tests/contrib/urllib3/test_urllib3.py @@ -35,7 +35,7 @@ def setUp(self): patch() self.http = urllib3.PoolManager() - Pin.override(urllib3.connectionpool.HTTPConnectionPool, tracer=self.tracer) + Pin._override(urllib3.connectionpool.HTTPConnectionPool, tracer=self.tracer) def tearDown(self): super(BaseUrllib3TestCase, self).tearDown() diff --git a/tests/contrib/valkey/__init__.py b/tests/contrib/valkey/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/valkey/test_valkey.py b/tests/contrib/valkey/test_valkey.py new file mode 100644 index 00000000000..772418dcc16 --- /dev/null +++ b/tests/contrib/valkey/test_valkey.py @@ -0,0 +1,615 @@ +# -*- coding: utf-8 -*- +from unittest import mock + +import pytest +import valkey + +import ddtrace +from ddtrace.contrib.internal.valkey.patch import patch +from ddtrace.contrib.internal.valkey.patch import unpatch +from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin +from tests.opentracer.utils import init_tracer +from tests.utils import DummyTracer +from tests.utils import TracerTestCase +from tests.utils import snapshot + +from ..config import VALKEY_CONFIG + + +class TestValkeyPatch(TracerTestCase): + TEST_PORT = VALKEY_CONFIG["port"] + + def setUp(self): + super(TestValkeyPatch, self).setUp() + patch() + r = valkey.Valkey(port=self.TEST_PORT) + r.flushall() + Pin._override(r, tracer=self.tracer) + self.r = r + + def tearDown(self): + unpatch() + super(TestValkeyPatch, self).tearDown() + + def command_test_rowcount(self, raw_command, row_count, expect_result=True, **kwargs): + command_args_as_list = raw_command.split(" ") + + command_name = command_args_as_list[0].lower() + + if hasattr(self.r, command_name): + func = getattr(self.r, command_name) + + try: + # try to run function with kwargs, may fail due to valkey version + result = yield func(*command_args_as_list[1:], **kwargs) + for k in kwargs.keys(): + raw_command += " " + str(kwargs[k]) + except Exception: + # try without keyword arguments + result = func(*command_args_as_list[1:]) + + if expect_result: + assert result is not None + else: + empty_result = [None, [], {}, b""] + if isinstance(result, list): + result = [x for x in result if x] + assert result in empty_result + + command_span = self.get_spans()[-1] + + assert command_span.name == "valkey.command" + assert command_span.get_tag("valkey.raw_command") == raw_command + assert command_span.get_metric("db.row_count") == row_count + + def test_long_command(self): + self.r.mget(*range(1000)) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + + self.assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.span_type == "valkey" + assert span.error == 0 + meta = { + "out.host": "localhost", + } + metrics = { + "network.destination.port": self.TEST_PORT, + "out.valkey_db": 0, + } + for k, v in meta.items(): + assert span.get_tag(k) == v + for k, v in metrics.items(): + assert span.get_metric(k) == v + + assert span.get_tag("valkey.raw_command").startswith("MGET 0 1 2 3") + assert span.get_tag("valkey.raw_command").endswith("...") + assert span.get_tag("component") == "valkey" + assert span.get_tag("span.kind") == "client" + assert span.get_tag("db.system") == "valkey" + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) + def test_service_name_v1(self): + us = self.r.get("cheese") + assert us is None + spans = self.get_spans() + span = spans[0] + assert span.service == DEFAULT_SPAN_SERVICE_NAME + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) + def test_operation_name_v0_schema(self): + us = self.r.get("cheese") + assert us is None + spans = self.get_spans() + span = spans[0] + assert span.name == "valkey.command" + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) + def test_operation_name_v1_schema(self): + us = self.r.get("cheese") + assert us is None + spans = self.get_spans() + span = spans[0] + assert span.name == "valkey.command" + + def test_basics(self): + us = self.r.get("cheese") + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + self.assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_metric("out.valkey_db") == 0 + assert span.get_tag("out.host") == "localhost" + assert span.get_tag("valkey.raw_command") == "GET cheese" + assert span.get_tag("component") == "valkey" + assert span.get_tag("span.kind") == "client" + assert span.get_tag("db.system") == "valkey" + assert span.get_metric("valkey.args_length") == 2 + assert span.resource == "GET" + + def test_connection_error(self): + with mock.patch.object( + valkey.connection.ConnectionPool, + "get_connection", + side_effect=valkey.exceptions.ConnectionError("whatever"), + ): + with pytest.raises(valkey.exceptions.ConnectionError): + self.r.get("foo") + + def test_pipeline_traced(self): + with self.r.pipeline(transaction=False) as p: + p.set("blah", 32) + p.rpush("foo", "éé") + p.hgetall("xxx") + p.execute() + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + self.assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.resource == "SET\nRPUSH\nHGETALL" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_metric("out.valkey_db") == 0 + assert span.get_tag("out.host") == "localhost" + assert span.get_tag("valkey.raw_command") == "SET blah 32\nRPUSH foo éé\nHGETALL xxx" + assert span.get_tag("component") == "valkey" + assert span.get_tag("span.kind") == "client" + assert span.get_metric("valkey.pipeline_length") == 3 + assert span.get_metric("valkey.pipeline_length") == 3 + + def test_pipeline_immediate(self): + with self.r.pipeline() as p: + p.set("a", 1) + p.immediate_execute_command("SET", "a", 1) + p.execute() + + spans = self.get_spans() + assert len(spans) == 2 + span = spans[0] + self.assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.resource == "SET" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_metric("out.valkey_db") == 0 + assert span.get_tag("out.host") == "localhost" + assert span.get_tag("component") == "valkey" + assert span.get_tag("span.kind") == "client" + + def test_meta_override(self): + r = self.r + pin = Pin.get_from(r) + if pin: + pin._clone(tags={"cheese": "camembert"}).onto(r) + + r.get("cheese") + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == "valkey" + assert "cheese" in span.get_tags() and span.get_tag("cheese") == "camembert" + + def test_patch_unpatch(self): + tracer = DummyTracer() + + # Test patch idempotence + patch() + patch() + + r = valkey.Valkey(port=VALKEY_CONFIG["port"]) + Pin.get_from(r)._clone(tracer=tracer).onto(r) + r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + r = valkey.Valkey(port=VALKEY_CONFIG["port"]) + r.get("key") + + spans = tracer.pop() + assert not spans, spans + + # Test patch again + patch() + + r = valkey.Valkey(port=VALKEY_CONFIG["port"]) + Pin.get_from(r)._clone(tracer=tracer).onto(r) + r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + def test_opentracing(self): + """Ensure OpenTracing works with valkey.""" + ot_tracer = init_tracer("valkey_svc", self.tracer) + + with ot_tracer.start_active_span("valkey_get"): + us = self.r.get("cheese") + assert us is None + + spans = self.get_spans() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == "valkey_get" + assert ot_span.service == "valkey_svc" + + self.assert_is_measured(dd_span) + assert dd_span.service == "valkey" + assert dd_span.name == "valkey.command" + assert dd_span.span_type == "valkey" + assert dd_span.error == 0 + assert dd_span.get_metric("out.valkey_db") == 0 + assert dd_span.get_tag("out.host") == "localhost" + assert dd_span.get_tag("valkey.raw_command") == "GET cheese" + assert dd_span.get_tag("component") == "valkey" + assert dd_span.get_tag("span.kind") == "client" + assert dd_span.get_tag("db.system") == "valkey" + assert dd_span.get_metric("valkey.args_length") == 2 + assert dd_span.resource == "GET" + + def test_valkey_rowcount_all_keys_valid(self): + self.r.set("key1", "value1") + + get1 = self.r.get("key1") + + assert get1 == b"value1" + + spans = self.get_spans() + get_valid_key_span = spans[1] + + assert get_valid_key_span.name == "valkey.command" + assert get_valid_key_span.get_tag("valkey.raw_command") == "GET key1" + assert get_valid_key_span.get_metric("db.row_count") == 1 + + get_commands = ["GET key", "GETEX key", "GETRANGE key 0 2"] + list_get_commands = ["LINDEX lkey 0", "LRANGE lkey 0 3", "RPOP lkey", "LPOP lkey"] + hashing_get_commands = [ + "HGET hkey field1", + "HGETALL hkey", + "HKEYS hkey", + "HMGET hkey field1 field2", + "HRANDFIELD hkey", + "HVALS hkey", + ] + multi_key_get_commands = ["MGET key key2", "MGET key key2 key3", "MGET key key2 key3 key4"] + + for command in get_commands: + self.r.set("key", "value") + self.command_test_rowcount(command, 1) + for command in list_get_commands: + self.r.lpush("lkey", "1", "2", "3", "4", "5") + self.command_test_rowcount(command, 1) + if command == "RPOP lkey": # lets get multiple values from the set and ensure rowcount is still 1 + self.command_test_rowcount(command, 1, count=2) + for command in hashing_get_commands: + self.r.hset("hkey", "field1", "value1") + self.r.hset("hkey", "field2", "value2") + self.command_test_rowcount(command, 1) + for command in multi_key_get_commands: + self.r.mset({"key": "value", "key2": "value2", "key3": "value3", "key4": "value4"}) + self.command_test_rowcount(command, len(command.split(" ")) - 1) + + def test_valkey_rowcount_some_keys_valid(self): + self.r.mset({"key": "value", "key2": "value2"}) + + get_both_valid = self.r.mget("key", "key2") + get_one_missing = self.r.mget("key", "missing_key") + + assert get_both_valid == [b"value", b"value2"] + assert get_one_missing == [b"value", None] + + spans = self.get_spans() + get_both_valid_span = spans[1] + get_one_missing_span = spans[2] + + assert get_both_valid_span.name == "valkey.command" + assert get_both_valid_span.get_tag("valkey.raw_command") == "MGET key key2" + assert get_both_valid_span.get_metric("db.row_count") == 2 + + assert get_one_missing_span.name == "valkey.command" + assert get_one_missing_span.get_tag("valkey.raw_command") == "MGET key missing_key" + assert get_one_missing_span.get_metric("db.row_count") == 1 + + multi_key_get_commands = [ + "MGET key key2", + "MGET key missing_key", + "MGET key key2 missing_key", + "MGET key missing_key missing_key2 key2", + ] + + for command in multi_key_get_commands: + command_keys = command.split(" ")[1:] + self.command_test_rowcount(command, len([key for key in command_keys if "missing_key" not in key])) + + def test_valkey_rowcount_no_keys_valid(self): + get_missing = self.r.get("missing_key") + + assert get_missing is None + + spans = self.get_spans() + get_missing_key_span = spans[0] + + assert get_missing_key_span.name == "valkey.command" + assert get_missing_key_span.get_tag("valkey.raw_command") == "GET missing_key" + assert get_missing_key_span.get_metric("db.row_count") == 0 + + get_commands = ["GET key", "GETDEL key", "GETEX key", "GETRANGE key 0 2"] + list_get_commands = ["LINDEX lkey 0", "LRANGE lkey 0 3", "RPOP lkey", "LPOP lkey"] + hashing_get_commands = [ + "HGET hkey field1", + "HGETALL hkey", + "HKEYS hkey", + "HMGET hkey field1 field2", + "HRANDFIELD hkey", + "HVALS hkey", + ] + multi_key_get_commands = ["MGET key key2", "MGET key key2 key3", "MGET key key2 key3 key4"] + + for command in get_commands: + self.command_test_rowcount(command, 0, expect_result=False) + for command in list_get_commands: + self.command_test_rowcount(command, 0, expect_result=False) + if command == "RPOP lkey": # lets get multiple values from the set and ensure rowcount is still 1 + self.command_test_rowcount(command, 0, expect_result=False, count=2) + for command in hashing_get_commands: + self.command_test_rowcount(command, 0, expect_result=False) + for command in multi_key_get_commands: + self.command_test_rowcount(command, 0, expect_result=False) + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) + def test_user_specified_service_default(self): + from ddtrace import config + + assert config.service == "mysvc" + + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "valkey" + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) + def test_user_specified_service_v0(self): + from ddtrace import config + + assert config.service == "mysvc" + + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "valkey" + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) + def test_user_specified_service_v1(self): + from ddtrace import config + + assert config.service == "mysvc" + + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "mysvc" + + @TracerTestCase.run_in_subprocess( + env_overrides=dict(DD_VALKEY_SERVICE="myvalkey", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0") + ) + def test_env_user_specified_valkey_service_v0(self): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "myvalkey", span.service + + self.reset() + + # Global config + with self.override_config("valkey", dict(service="cfg-valkey")): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "cfg-valkey", span.service + + self.reset() + + # Manual override + Pin._override(self.r, service="mysvc", tracer=self.tracer) + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "mysvc", span.service + + @TracerTestCase.run_in_subprocess( + env_overrides=dict( + DD_SERVICE="app-svc", DD_VALKEY_SERVICE="env-specified-valkey-svc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0" + ) + ) + def test_service_precedence_v0(self): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "env-specified-valkey-svc", span.service + + self.reset() + + # Do a manual override + Pin._override(self.r, service="override-valkey", tracer=self.tracer) + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "override-valkey", span.service + + +class TestValkeyPatchSnapshot(TracerTestCase): + TEST_PORT = VALKEY_CONFIG["port"] + + def setUp(self): + super(TestValkeyPatchSnapshot, self).setUp() + patch() + r = valkey.Valkey(port=self.TEST_PORT) + self.r = r + + def tearDown(self): + unpatch() + super(TestValkeyPatchSnapshot, self).tearDown() + self.r.flushall() + + @snapshot() + def test_long_command(self): + self.r.mget(*range(1000)) + + @snapshot() + def test_basics(self): + us = self.r.get("cheese") + assert us is None + + @snapshot() + def test_unicode(self): + us = self.r.get("😐") + assert us is None + + @snapshot() + def test_pipeline_traced(self): + with self.r.pipeline(transaction=False) as p: + p.set("blah", 32) + p.rpush("foo", "éé") + p.hgetall("xxx") + p.execute() + + @snapshot() + def test_pipeline_immediate(self): + with self.r.pipeline() as p: + p.set("a", 1) + p.immediate_execute_command("SET", "a", 1) + p.execute() + + @snapshot() + def test_meta_override(self): + r = self.r + pin = Pin.get_from(r) + if pin: + pin._clone(tags={"cheese": "camembert"}).onto(r) + + r.get("cheese") + + def test_patch_unpatch(self): + tracer = DummyTracer() + + # Test patch idempotence + patch() + patch() + + r = valkey.Valkey(port=VALKEY_CONFIG["port"]) + Pin.get_from(r)._clone(tracer=tracer).onto(r) + r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + r = valkey.Valkey(port=VALKEY_CONFIG["port"]) + r.get("key") + + spans = tracer.pop() + assert not spans, spans + + # Test patch again + patch() + + r = valkey.Valkey(port=VALKEY_CONFIG["port"]) + Pin.get_from(r)._clone(tracer=tracer).onto(r) + r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + @snapshot() + def test_opentracing(self): + """Ensure OpenTracing works with valkey.""" + ot_tracer = init_tracer("valkey_svc", ddtrace.tracer) + + with ot_tracer.start_active_span("valkey_get"): + us = self.r.get("cheese") + assert us is None + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc")) + @snapshot() + def test_user_specified_service(self): + from ddtrace import config + + assert config.service == "mysvc" + + self.r.get("cheese") + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_VALKEY_SERVICE="myvalkey")) + @snapshot() + def test_env_user_specified_valkey_service(self): + self.r.get("cheese") + + self.reset() + + # Global config + with self.override_config("valkey", dict(service="cfg-valkey")): + self.r.get("cheese") + + self.reset() + + # Manual override + Pin._override(self.r, service="mysvc", tracer=self.tracer) + self.r.get("cheese") + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="app-svc", DD_VALKEY_SERVICE="env-valkey")) + @snapshot() + def test_service_precedence(self): + self.r.get("cheese") + + self.reset() + + # Do a manual override + Pin._override(self.r, service="override-valkey", tracer=self.tracer) + self.r.get("cheese") + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_VALKEY_CMD_MAX_LENGTH="10")) + @snapshot() + def test_custom_cmd_length_env(self): + self.r.get("here-is-a-long-key-name") + + @snapshot() + def test_custom_cmd_length(self): + with self.override_config("valkey", dict(cmd_max_length=7)): + self.r.get("here-is-a-long-key-name") + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_VALKEY_RESOURCE_ONLY_COMMAND="false")) + @snapshot() + def test_full_command_in_resource_env(self): + self.r.get("put_key_in_resource") + p = self.r.pipeline(transaction=False) + p.set("pipeline-cmd1", 1) + p.set("pipeline-cmd2", 2) + p.execute() + + @snapshot() + def test_full_command_in_resource_config(self): + with self.override_config("valkey", dict(resource_only_command=False)): + self.r.get("put_key_in_resource") + p = self.r.pipeline(transaction=False) + p.set("pipeline-cmd1", 1) + p.set("pipeline-cmd2", 2) + p.execute() diff --git a/tests/contrib/valkey/test_valkey_asyncio.py b/tests/contrib/valkey/test_valkey_asyncio.py new file mode 100644 index 00000000000..cce88912d65 --- /dev/null +++ b/tests/contrib/valkey/test_valkey_asyncio.py @@ -0,0 +1,221 @@ +import asyncio +import typing +from unittest import mock + +import pytest +import valkey +import valkey.asyncio +from wrapt import ObjectProxy + +from ddtrace import tracer +from ddtrace.contrib.internal.valkey.patch import patch +from ddtrace.contrib.internal.valkey.patch import unpatch +from ddtrace.trace import Pin +from tests.utils import override_config + +from ..config import VALKEY_CONFIG + + +def get_valkey_instance(max_connections: int, client_name: typing.Optional[str] = None): + return valkey.asyncio.from_url( + "valkey://127.0.0.1:%s" % VALKEY_CONFIG["port"], max_connections=max_connections, client_name=client_name + ) + + +@pytest.fixture +def valkey_client(): + r = get_valkey_instance(max_connections=10) # default values + yield r + + +@pytest.fixture +def single_pool_valkey_client(): + r = get_valkey_instance(max_connections=1) + yield r + + +@pytest.fixture(autouse=True) +async def traced_valkey(valkey_client): + await valkey_client.flushall() + + patch() + try: + yield + finally: + unpatch() + await valkey_client.flushall() + + +def test_patching(): + """ + When patching valkey library + We wrap the correct methods + When unpatching valkey library + We unwrap the correct methods + """ + assert isinstance(valkey.asyncio.client.Valkey.execute_command, ObjectProxy) + assert isinstance(valkey.asyncio.client.Valkey.pipeline, ObjectProxy) + assert isinstance(valkey.asyncio.client.Pipeline.pipeline, ObjectProxy) + unpatch() + assert not isinstance(valkey.asyncio.client.Valkey.execute_command, ObjectProxy) + assert not isinstance(valkey.asyncio.client.Valkey.pipeline, ObjectProxy) + assert not isinstance(valkey.asyncio.client.Pipeline.pipeline, ObjectProxy) + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_basic_request(valkey_client): + val = await valkey_client.get("cheese") + assert val is None + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_unicode_request(valkey_client): + val = await valkey_client.get("😐") + assert val is None + + +@pytest.mark.snapshot(wait_for_num_traces=1, ignores=["meta.error.stack"]) +async def test_connection_error(valkey_client): + with mock.patch.object( + valkey.asyncio.connection.ConnectionPool, + "get_connection", + side_effect=valkey.exceptions.ConnectionError("whatever"), + ): + with pytest.raises(valkey.exceptions.ConnectionError): + await valkey_client.get("foo") + + +@pytest.mark.snapshot(wait_for_num_traces=2) +async def test_decoding_non_utf8_args(valkey_client): + await valkey_client.set(b"\x80foo", b"\x80abc") + val = await valkey_client.get(b"\x80foo") + assert val == b"\x80abc" + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_decoding_non_utf8_pipeline_args(valkey_client): + p = valkey_client.pipeline() + p.set(b"\x80blah", "boo") + p.set("foo", b"\x80abc") + p.get(b"\x80blah") + p.get("foo") + + response_list = await p.execute() + assert response_list[0] is True # response from valkey.set is OK if successfully pushed + assert response_list[1] is True + assert response_list[2].decode() == "boo" + assert response_list[3] == b"\x80abc" + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_long_command(valkey_client): + length = 1000 + val_list = await valkey_client.mget(*range(length)) + assert len(val_list) == length + for val in val_list: + assert val is None + + +@pytest.mark.snapshot(wait_for_num_traces=3) +async def test_override_service_name(valkey_client): + with override_config("valkey", dict(service_name="myvalkey")): + val = await valkey_client.get("cheese") + assert val is None + await valkey_client.set("cheese", "my-cheese") + val = await valkey_client.get("cheese") + if isinstance(val, bytes): + val = val.decode() + assert val == "my-cheese" + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_pin(valkey_client): + Pin._override(valkey_client, service="my-valkey") + val = await valkey_client.get("cheese") + assert val is None + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_pipeline_traced(valkey_client): + p = valkey_client.pipeline(transaction=False) + p.set("blah", "boo") + p.set("foo", "bar") + p.get("blah") + p.get("foo") + + response_list = await p.execute() + assert response_list[0] is True # response from valkey.set is OK if successfully pushed + assert response_list[1] is True + assert ( + response_list[2].decode() == "boo" + ) # response from hset is 'Integer reply: The number of fields that were added.' + assert response_list[3].decode() == "bar" + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_pipeline_traced_context_manager_transaction(valkey_client): + """ + Regression test for: https://github.com/DataDog/dd-trace-py/issues/3106 + + Example:: + + async def main(): + valkey = await valkey.from_url("valkey://localhost") + async with valkey.pipeline(transaction=True) as pipe: + ok1, ok2 = await (pipe.set("key1", "value1").set("key2", "value2").execute()) + assert ok1 + assert ok2 + """ + + async with valkey_client.pipeline(transaction=True) as p: + set_1, set_2, get_1, get_2 = await p.set("blah", "boo").set("foo", "bar").get("blah").get("foo").execute() + + # response from valkey.set is OK if successfully pushed + assert set_1 is True + assert set_2 is True + assert get_1.decode() == "boo" + assert get_2.decode() == "bar" + + +@pytest.mark.snapshot(wait_for_num_traces=1) +async def test_two_traced_pipelines(valkey_client): + with tracer.trace("web-request", service="test"): + p1 = await valkey_client.pipeline(transaction=False) + p2 = await valkey_client.pipeline(transaction=False) + await p1.set("blah", "boo") + await p2.set("foo", "bar") + await p1.get("blah") + await p2.get("foo") + + response_list1 = await p1.execute() + response_list2 = await p2.execute() + + assert response_list1[0] is True # response from valkey.set is OK if successfully pushed + assert response_list2[0] is True + assert ( + response_list1[1].decode() == "boo" + ) # response from hset is 'Integer reply: The number of fields that were added.' + assert response_list2[1].decode() == "bar" + + +async def test_parenting(valkey_client, snapshot_context): + with snapshot_context(wait_for_num_traces=1): + with tracer.trace("web-request", service="test"): + await valkey_client.set("blah", "boo") + await valkey_client.get("blah") + + +async def test_client_name(snapshot_context): + with snapshot_context(wait_for_num_traces=1): + with tracer.trace("web-request", service="test"): + valkey_client = get_valkey_instance(10, client_name="testing-client-name") + await valkey_client.get("blah") + + +@pytest.mark.asyncio +async def test_asyncio_task_cancelled(valkey_client): + with mock.patch.object( + valkey.asyncio.connection.ConnectionPool, "get_connection", side_effect=asyncio.CancelledError + ): + with pytest.raises(asyncio.CancelledError): + await valkey_client.get("foo") diff --git a/tests/contrib/valkey/test_valkey_cluster.py b/tests/contrib/valkey/test_valkey_cluster.py new file mode 100644 index 00000000000..bd765dbff96 --- /dev/null +++ b/tests/contrib/valkey/test_valkey_cluster.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +import valkey + +from ddtrace.contrib.internal.valkey.patch import patch +from ddtrace.contrib.internal.valkey.patch import unpatch +from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME +from ddtrace.trace import Pin +from tests.contrib.config import VALKEY_CLUSTER_CONFIG +from tests.utils import DummyTracer +from tests.utils import TracerTestCase +from tests.utils import assert_is_measured + + +class TestValkeyClusterPatch(TracerTestCase): + TEST_HOST = VALKEY_CLUSTER_CONFIG["host"] + TEST_PORTS = VALKEY_CLUSTER_CONFIG["ports"] + + def _get_test_client(self): + startup_nodes = [valkey.cluster.ClusterNode(self.TEST_HOST, int(port)) for port in self.TEST_PORTS.split(",")] + return valkey.cluster.ValkeyCluster(startup_nodes=startup_nodes) + + def setUp(self): + super(TestValkeyClusterPatch, self).setUp() + patch() + r = self._get_test_client() + r.flushall() + Pin._override(r, tracer=self.tracer) + self.r = r + + def tearDown(self): + unpatch() + super(TestValkeyClusterPatch, self).tearDown() + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) + def test_span_service_name_v1(self): + us = self.r.get("cheese") + assert us is None + spans = self.get_spans() + span = spans[0] + assert span.service == DEFAULT_SPAN_SERVICE_NAME + + def test_basics(self): + us = self.r.get("cheese") + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_tag("valkey.raw_command") == "GET cheese" + assert span.get_tag("component") == "valkey" + assert span.get_tag("db.system") == "valkey" + assert span.get_metric("valkey.args_length") == 2 + assert span.resource == "GET" + + def test_unicode(self): + us = self.r.get("😐") + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_tag("valkey.raw_command") == "GET 😐" + assert span.get_tag("component") == "valkey" + assert span.get_tag("db.system") == "valkey" + assert span.get_metric("valkey.args_length") == 2 + assert span.resource == "GET" + + def test_pipeline(self): + with self.r.pipeline(transaction=False) as p: + p.set("blah", 32) + p.rpush("foo", "éé") + p.hgetall("xxx") + p.execute() + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.resource == "SET\nRPUSH\nHGETALL" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_tag("valkey.raw_command") == "SET blah 32\nRPUSH foo éé\nHGETALL xxx" + assert span.get_tag("component") == "valkey" + assert span.get_metric("valkey.pipeline_length") == 3 + + def test_patch_unpatch(self): + tracer = DummyTracer() + + # Test patch idempotence + patch() + patch() + + r = self._get_test_client() + Pin.get_from(r)._clone(tracer=tracer).onto(r) + r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + r = self._get_test_client() + r.get("key") + + spans = tracer.pop() + assert not spans, spans + + # Test patch again + patch() + + r = self._get_test_client() + Pin.get_from(r)._clone(tracer=tracer).onto(r) + r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")) + def test_user_specified_service_v0(self): + """ + When a user specifies a service for the app + The valkeycluster integration should not use it. + """ + # Ensure that the service name was configured + from ddtrace import config + + assert config.service == "mysvc" + + r = self._get_test_client() + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) + r.get("key") + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service != "mysvc" + + @TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")) + def test_user_specified_service_v1(self): + """ + When a user specifies a service for the app + The valkeycluster integration should use it. + """ + # Ensure that the service name was configured + from ddtrace import config + + assert config.service == "mysvc" + + r = self._get_test_client() + Pin.get_from(r)._clone(tracer=self.tracer).onto(r) + r.get("key") + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == "mysvc" + + @TracerTestCase.run_in_subprocess( + env_overrides=dict(DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0") + ) + def test_env_user_specified_valkeycluster_service_v0(self): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "myvalkeycluster", span.service + + @TracerTestCase.run_in_subprocess( + env_overrides=dict(DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1") + ) + def test_env_user_specified_valkeycluster_service_v1(self): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "myvalkeycluster", span.service + + @TracerTestCase.run_in_subprocess( + env_overrides=dict( + DD_SERVICE="app-svc", DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0" + ) + ) + def test_service_precedence_v0(self): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "myvalkeycluster" + + self.reset() + + @TracerTestCase.run_in_subprocess( + env_overrides=dict( + DD_SERVICE="app-svc", DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1" + ) + ) + def test_service_precedence_v1(self): + self.r.get("cheese") + span = self.get_spans()[0] + assert span.service == "myvalkeycluster" + + self.reset() diff --git a/tests/contrib/valkey/test_valkey_cluster_asyncio.py b/tests/contrib/valkey/test_valkey_cluster_asyncio.py new file mode 100644 index 00000000000..a5e4db415b0 --- /dev/null +++ b/tests/contrib/valkey/test_valkey_cluster_asyncio.py @@ -0,0 +1,457 @@ +# -*- coding: utf-8 -*- +import pytest +import valkey + +from ddtrace.contrib.internal.valkey.patch import patch +from ddtrace.contrib.internal.valkey.patch import unpatch +from ddtrace.trace import Pin +from tests.contrib.config import VALKEY_CLUSTER_CONFIG +from tests.utils import DummyTracer +from tests.utils import assert_is_measured + + +TEST_HOST = VALKEY_CLUSTER_CONFIG["host"] +TEST_PORTS = VALKEY_CLUSTER_CONFIG["ports"] + + +@pytest.mark.asyncio +@pytest.fixture +async def valkey_cluster(): + startup_nodes = [valkey.asyncio.cluster.ClusterNode(TEST_HOST, int(port)) for port in TEST_PORTS.split(",")] + yield valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + + +@pytest.mark.asyncio +@pytest.fixture +async def traced_valkey_cluster(tracer, test_spans): + patch() + startup_nodes = [valkey.asyncio.cluster.ClusterNode(TEST_HOST, int(port)) for port in TEST_PORTS.split(",")] + valkey_cluster = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + await valkey_cluster.flushall() + Pin._override(valkey_cluster, tracer=tracer) + try: + yield valkey_cluster, test_spans + finally: + unpatch() + await valkey_cluster.flushall() + + +@pytest.mark.asyncio +async def test_basics(traced_valkey_cluster): + cluster, test_spans = traced_valkey_cluster + us = await cluster.get("cheese") + assert us is None + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + + span = spans[0] + assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_tag("valkey.raw_command") == "GET cheese" + assert span.get_tag("component") == "valkey" + assert span.get_tag("db.system") == "valkey" + assert span.get_metric("valkey.args_length") == 2 + assert span.resource == "GET" + + +@pytest.mark.asyncio +async def test_unicode(traced_valkey_cluster): + cluster, test_spans = traced_valkey_cluster + us = await cluster.get("😐") + assert us is None + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + + span = spans[0] + assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_tag("valkey.raw_command") == "GET 😐" + assert span.get_tag("component") == "valkey" + assert span.get_tag("db.system") == "valkey" + assert span.get_metric("valkey.args_length") == 2 + assert span.resource == "GET" + + +@pytest.mark.asyncio +async def test_pipeline(traced_valkey_cluster): + cluster, test_spans = traced_valkey_cluster + async with cluster.pipeline(transaction=False) as p: + p.set("blah", 32) + p.rpush("foo", "éé") + p.hgetall("xxx") + await p.execute() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + + span = spans[0] + assert_is_measured(span) + assert span.service == "valkey" + assert span.name == "valkey.command" + assert span.resource == "SET\nRPUSH\nHGETALL" + assert span.span_type == "valkey" + assert span.error == 0 + assert span.get_tag("valkey.raw_command") == "SET blah 32\nRPUSH foo éé\nHGETALL xxx" + assert span.get_tag("component") == "valkey" + assert span.get_metric("valkey.pipeline_length") == 3 + + +@pytest.mark.asyncio +async def test_patch_unpatch(valkey_cluster): + tracer = DummyTracer() + + # Test patch idempotence + patch() + patch() + + r = valkey_cluster + Pin._override(r, tracer=tracer) + await r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + r = valkey_cluster + await r.get("key") + + spans = tracer.pop() + assert not spans, spans + + # Test patch again + patch() + + r = valkey_cluster + Pin._override(r, tracer=tracer) + await r.get("key") + + spans = tracer.pop() + assert spans, spans + assert len(spans) == 1 + unpatch() + + +@pytest.mark.subprocess( + env=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_default_service_name_v1(): + import asyncio + + import valkey + + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service == DEFAULT_SPAN_SERVICE_NAME + + asyncio.run(test()) + + +@pytest.mark.subprocess( + env=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0"), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_user_specified_service_v0(): + """ + When a user specifies a service for the app + The valkeycluster integration should not use it. + """ + import asyncio + + import valkey + + from ddtrace import config + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + # # Ensure that the service name was configured + assert config.service == "mysvc" + + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service != "mysvc" + + asyncio.run(test()) + + +@pytest.mark.subprocess( + env=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_user_specified_service_v1(): + """ + When a user specifies a service for the app + The valkeycluster integration should use it. + """ + import asyncio + + import valkey + + from ddtrace import config + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + # # Ensure that the service name was configured + assert config.service == "mysvc" + + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service == "mysvc" + + asyncio.run(test()) + + +@pytest.mark.subprocess( + env=dict(DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0"), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_env_user_specified_valkeycluster_service_v0(): + import asyncio + + import valkey + + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service == "myvalkeycluster" + + asyncio.run(test()) + + +@pytest.mark.subprocess( + env=dict(DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_env_user_specified_valkeycluster_service_v1(): + import asyncio + + import valkey + + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service == "myvalkeycluster" + + asyncio.run(test()) + + +@pytest.mark.subprocess( + env=dict( + DD_SERVICE="mysvc", + DD_VALKEY_SERVICE="myvalkeycluster", + DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0", + ), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_service_precedence_v0(): + import asyncio + + import valkey + + from ddtrace import config + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + # # Ensure that the service name was configured + assert config.service == "mysvc" + + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service == "myvalkeycluster" + + asyncio.run(test()) + + +@pytest.mark.subprocess( + env=dict(DD_SERVICE="mysvc", DD_VALKEY_SERVICE="myvalkeycluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"), + err=None, # avoid checking stderr because of an expected deprecation warning +) +def test_service_precedence_v1(): + import asyncio + + import valkey + + from ddtrace import config + from ddtrace.contrib.internal.valkey.patch import patch + from ddtrace.trace import Pin + from tests.contrib.config import VALKEY_CLUSTER_CONFIG + from tests.utils import DummyTracer + from tests.utils import TracerSpanContainer + + patch() + + async def test(): + # # Ensure that the service name was configured + assert config.service == "mysvc" + + startup_nodes = [ + valkey.asyncio.cluster.ClusterNode(VALKEY_CLUSTER_CONFIG["host"], int(port)) + for port in VALKEY_CLUSTER_CONFIG["ports"].split(",") + ] + r = valkey.asyncio.cluster.ValkeyCluster(startup_nodes=startup_nodes) + tracer = DummyTracer() + test_spans = TracerSpanContainer(tracer) + + Pin.get_from(r)._clone(tracer=tracer).onto(r) + await r.get("key") + await r.close() + + traces = test_spans.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.service == "myvalkeycluster" + + asyncio.run(test()) diff --git a/tests/contrib/yaaredis/test_yaaredis_patch.py b/tests/contrib/valkey/test_valkey_patch.py similarity index 52% rename from tests/contrib/yaaredis/test_yaaredis_patch.py rename to tests/contrib/valkey/test_valkey_patch.py index d93247a1faa..320d2b82b6a 100644 --- a/tests/contrib/yaaredis/test_yaaredis_patch.py +++ b/tests/contrib/valkey/test_valkey_patch.py @@ -3,29 +3,29 @@ # removed the ``_generated`` suffix from the file name, to prevent the content # from being overwritten by future re-generations. -from ddtrace.contrib.internal.yaaredis.patch import get_version -from ddtrace.contrib.internal.yaaredis.patch import patch +from ddtrace.contrib.internal.valkey.patch import get_version +from ddtrace.contrib.internal.valkey.patch import patch try: - from ddtrace.contrib.internal.yaaredis.patch import unpatch + from ddtrace.contrib.internal.valkey.patch import unpatch except ImportError: unpatch = None from tests.contrib.patch import PatchTestCase -class TestYaaredisPatch(PatchTestCase.Base): - __integration_name__ = "yaaredis" - __module_name__ = "yaaredis" +class TestValkeyPatch(PatchTestCase.Base): + __integration_name__ = "valkey" + __module_name__ = "valkey" __patch_func__ = patch __unpatch_func__ = unpatch __get_version__ = get_version - def assert_module_patched(self, yaaredis): + def assert_module_patched(self, valkey): pass - def assert_not_module_patched(self, yaaredis): + def assert_not_module_patched(self, valkey): pass - def assert_not_module_double_patched(self, yaaredis): + def assert_not_module_double_patched(self, valkey): pass diff --git a/tests/contrib/vertexai/conftest.py b/tests/contrib/vertexai/conftest.py index 0b2b976b610..173551a219a 100644 --- a/tests/contrib/vertexai/conftest.py +++ b/tests/contrib/vertexai/conftest.py @@ -43,7 +43,7 @@ def mock_tracer(ddtrace_global_config, vertexai): try: pin = Pin.get_from(vertexai) mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) - pin.override(vertexai, tracer=mock_tracer) + pin._override(vertexai, tracer=mock_tracer) pin.tracer._configure() if ddtrace_global_config.get("_llmobs_enabled", False): # Have to disable and re-enable LLMObs to use the mock tracer. diff --git a/tests/contrib/vertica/test_vertica.py b/tests/contrib/vertica/test_vertica.py index d3fb9709bea..2c0d554c01a 100644 --- a/tests/contrib/vertica/test_vertica.py +++ b/tests/contrib/vertica/test_vertica.py @@ -130,7 +130,7 @@ def test_configuration_service_name(self): conn = vertica_python.connect(**VERTICA_CONFIG) cur = conn.cursor() - Pin.override(cur, tracer=test_tracer) + Pin._override(cur, tracer=test_tracer) with conn: cur.execute("DROP TABLE IF EXISTS {}".format(TEST_TABLE)) spans = test_tracer.pop() @@ -163,7 +163,7 @@ def test_configuration_routine(self): test_tracer = DummyTracer() conn = vertica_python.connect(**VERTICA_CONFIG) - Pin.override(conn, service="mycustomservice", tracer=test_tracer) + Pin._override(conn, service="mycustomservice", tracer=test_tracer) conn.cursor() # should be traced now conn.close() spans = test_tracer.pop() @@ -175,7 +175,7 @@ def test_execute_metadata(self): """Metadata related to an `execute` call should be captured.""" conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) @@ -206,7 +206,7 @@ def test_cursor_override(self): """Test overriding the tracer with our own.""" conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) @@ -403,7 +403,7 @@ def test_user_specified_service_default(self): assert config.service == "mysvc" conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) cur.execute("SELECT * FROM {};".format(TEST_TABLE)) @@ -427,7 +427,7 @@ def test_user_specified_service_v0(self): assert config.service == "mysvc" conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) cur.execute("SELECT * FROM {};".format(TEST_TABLE)) @@ -451,7 +451,7 @@ def test_user_specified_service_v1(self): assert config.service == "mysvc" conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) cur.execute("SELECT * FROM {};".format(TEST_TABLE)) @@ -469,7 +469,7 @@ def test_unspecified_service_v0(self): should result in the default DD_SERVICE the span service """ conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) cur.execute("SELECT * FROM {};".format(TEST_TABLE)) @@ -487,7 +487,7 @@ def test_unspecified_service_v1(self): should result in the default DD_SERVICE the span service """ conn, cur = self.test_conn - Pin.override(cur, tracer=self.test_tracer) + Pin._override(cur, tracer=self.test_tracer) with conn: cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) cur.execute("SELECT * FROM {};".format(TEST_TABLE)) diff --git a/tests/contrib/yaaredis/test_yaaredis.py b/tests/contrib/yaaredis/test_yaaredis.py deleted file mode 100644 index 350b323de9c..00000000000 --- a/tests/contrib/yaaredis/test_yaaredis.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- encoding: utf-8 -*- -import os -import uuid - -import pytest -from wrapt import ObjectProxy -import yaaredis - -from ddtrace.contrib.internal.yaaredis.patch import patch -from ddtrace.contrib.internal.yaaredis.patch import unpatch -from ddtrace.trace import Pin -from tests.opentracer.utils import init_tracer -from tests.utils import override_config - -from ..config import REDIS_CONFIG - - -@pytest.fixture(autouse=True) -async def traced_yaaredis(): - r = yaaredis.StrictRedis(port=REDIS_CONFIG["port"]) - await r.flushall() - - patch() - try: - yield r - finally: - unpatch() - - r = yaaredis.StrictRedis(port=REDIS_CONFIG["port"]) - await r.flushall() - - -def test_patching(): - """ - When patching yaaredis library - We wrap the correct methods - When unpatching yaaredis library - We unwrap the correct methods - """ - assert isinstance(yaaredis.client.StrictRedis.execute_command, ObjectProxy) - assert isinstance(yaaredis.client.StrictRedis.pipeline, ObjectProxy) - assert isinstance(yaaredis.pipeline.StrictPipeline.execute, ObjectProxy) - assert isinstance(yaaredis.pipeline.StrictPipeline.immediate_execute_command, ObjectProxy) - - unpatch() - - assert not isinstance(yaaredis.client.StrictRedis.execute_command, ObjectProxy) - assert not isinstance(yaaredis.client.StrictRedis.pipeline, ObjectProxy) - assert not isinstance(yaaredis.pipeline.StrictPipeline.execute, ObjectProxy) - assert not isinstance(yaaredis.pipeline.StrictPipeline.immediate_execute_command, ObjectProxy) - - -@pytest.mark.asyncio -async def test_long_command(snapshot_context, traced_yaaredis): - with snapshot_context(): - await traced_yaaredis.mget(*range(1000)) - - -@pytest.mark.asyncio -@pytest.mark.snapshot -async def test_cmd_max_length(traced_yaaredis): - with override_config("yaaredis", dict(cmd_max_length=7)): - await traced_yaaredis.get("here-is-a-long-key") - - -@pytest.mark.skip(reason="No traces sent to the test agent") -@pytest.mark.subprocess(env=dict(DD_YAAREDIS_CMD_MAX_LENGTH="10"), ddtrace_run=True) -@pytest.mark.snapshot -def test_cmd_max_length_env(): - import asyncio - - import yaaredis - - from tests.contrib.config import REDIS_CONFIG - - async def main(): - r = yaaredis.StrictRedis(port=REDIS_CONFIG["port"]) - await r.get("here-is-a-long-key") - - asyncio.run(main()) - - -@pytest.mark.asyncio -async def test_basics(snapshot_context, traced_yaaredis): - with snapshot_context(): - await traced_yaaredis.get("cheese") - - -@pytest.mark.asyncio -async def test_unicode(snapshot_context, traced_yaaredis): - with snapshot_context(): - await traced_yaaredis.get("😐") - - -@pytest.mark.asyncio -async def test_pipeline_traced(snapshot_context, traced_yaaredis): - with snapshot_context(): - p = await traced_yaaredis.pipeline(transaction=False) - await p.set("blah", 32) - await p.rpush("foo", "éé") - await p.hgetall("xxx") - await p.execute() - - -@pytest.mark.asyncio -async def test_pipeline_immediate(snapshot_context, traced_yaaredis): - with snapshot_context(): - p = await traced_yaaredis.pipeline() - await p.set("a", 1) - await p.immediate_execute_command("SET", "a", 1) - await p.execute() - - -@pytest.mark.asyncio -async def test_meta_override(tracer, test_spans, traced_yaaredis): - pin = Pin.get_from(traced_yaaredis) - assert pin is not None - pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(traced_yaaredis) - - await traced_yaaredis.get("cheese") - test_spans.assert_trace_count(1) - test_spans.assert_span_count(1) - assert test_spans.spans[0].service == "redis" - assert test_spans.spans[0].get_tag("component") == "yaaredis" - assert test_spans.spans[0].get_tag("span.kind") == "client" - assert test_spans.spans[0].get_tag("db.system") == "redis" - assert "cheese" in test_spans.spans[0].get_tags() and test_spans.spans[0].get_tag("cheese") == "camembert" - - -@pytest.mark.asyncio -async def test_service_name(tracer, test_spans, traced_yaaredis): - service = str(uuid.uuid4()) - Pin.override(traced_yaaredis, service=service, tracer=tracer) - - await traced_yaaredis.set("cheese", "1") - test_spans.assert_trace_count(1) - test_spans.assert_span_count(1) - assert test_spans.spans[0].service == service - - -@pytest.mark.asyncio -async def test_service_name_config(tracer, test_spans, traced_yaaredis): - service = str(uuid.uuid4()) - with override_config("yaaredis", dict(service=service)): - Pin.override(traced_yaaredis, tracer=tracer) - await traced_yaaredis.set("cheese", "1") - test_spans.assert_trace_count(1) - test_spans.assert_span_count(1) - assert test_spans.spans[0].service == service - - -@pytest.mark.asyncio -async def test_opentracing(tracer, snapshot_context, traced_yaaredis): - """Ensure OpenTracing works with redis.""" - - with snapshot_context(): - pin = Pin.get_from(traced_yaaredis) - ot_tracer = init_tracer("redis_svc", pin.tracer) - - with ot_tracer.start_active_span("redis_get"): - await traced_yaaredis.get("cheese") - - -@pytest.mark.parametrize( - "service_schema", - [ - (None, None), - (None, "v0"), - (None, "v1"), - ("mysvc", None), - ("mysvc", "v0"), - ("mysvc", "v1"), - ], -) -@pytest.mark.snapshot() -def test_schematization(ddtrace_run_python_code_in_subprocess, service_schema): - service, schema = service_schema - code = """ -import sys - -import pytest - -from tests.contrib.yaaredis.test_yaaredis import traced_yaaredis - -@pytest.mark.asyncio -async def test_basics(traced_yaaredis): - async for client in traced_yaaredis: - await client.get("cheese") - - -if __name__ == "__main__": - sys.exit(pytest.main(["-x", __file__])) - """ - env = os.environ.copy() - if service: - env["DD_SERVICE"] = service - if schema: - env["DD_TRACE_SPAN_ATTRIBUTE_SCHEMA"] = schema - out, err, status, _ = ddtrace_run_python_code_in_subprocess(code, env=env) - assert status == 0, (err.decode(), out.decode()) - assert err == b"", err.decode() - - -@pytest.mark.subprocess(env=dict(DD_REDIS_RESOURCE_ONLY_COMMAND="false")) -@pytest.mark.snapshot -def test_full_command_in_resource_env(): - import asyncio - - import yaaredis - - import ddtrace - from tests.contrib.config import REDIS_CONFIG - - async def traced_client(): - with ddtrace.tracer.trace("web-request", service="test"): - redis_client = yaaredis.StrictRedis(port=REDIS_CONFIG["port"]) - await redis_client.get("put_key_in_resource") - p = await redis_client.pipeline(transaction=False) - await p.set("pipeline-cmd1", 1) - await p.set("pipeline-cmd2", 2) - await p.execute() - - ddtrace.patch(yaaredis=True) - asyncio.run(traced_client()) - - -@pytest.mark.snapshot -@pytest.mark.asyncio -@pytest.mark.parametrize("use_global_tracer", [True]) -async def test_full_command_in_resource_config(tracer, traced_yaaredis): - with override_config("yaaredis", dict(resource_only_command=False)): - with tracer.trace("web-request", service="test"): - await traced_yaaredis.get("put_key_in_resource") - p = await traced_yaaredis.pipeline(transaction=False) - await p.set("pipeline-cmd1", 1) - await p.set("pipeline-cmd2", 2) - await p.execute() diff --git a/tests/debugging/test_expressions.py b/tests/debugging/test_expressions.py index f064a951893..a4ffe0a48bf 100644 --- a/tests/debugging/test_expressions.py +++ b/tests/debugging/test_expressions.py @@ -74,6 +74,10 @@ def __getitem__(self, name): ({"eq": [{"ref": "hits"}, None]}, {"hits": None}, True), ({"substring": [{"ref": "payload"}, 4, 7]}, {"payload": "hello world"}, "hello world"[4:7]), ({"any": [{"ref": "collection"}, {"isEmpty": {"ref": "@it"}}]}, {"collection": ["foo", "bar", ""]}, True), + ({"any": [{"ref": "coll"}, {"isEmpty": {"ref": "@value"}}]}, {"coll": {0: "foo", 1: "bar", 2: ""}}, True), + ({"any": [{"ref": "coll"}, {"isEmpty": {"ref": "@value"}}]}, {"coll": {0: "foo", 1: "bar", 2: "baz"}}, False), + ({"any": [{"ref": "coll"}, {"isEmpty": {"ref": "@key"}}]}, {"coll": {"foo": 0, "bar": 1, "": 2}}, True), + ({"any": [{"ref": "coll"}, {"isEmpty": {"ref": "@key"}}]}, {"coll": {"foo": 0, "bar": 1, "baz": 2}}, False), ({"startsWith": [{"ref": "local_string"}, "hello"]}, {"local_string": "hello world!"}, True), ({"startsWith": [{"ref": "local_string"}, "world"]}, {"local_string": "hello world!"}, False), ( @@ -91,6 +95,16 @@ def __getitem__(self, name): {"collection": {"foo", "bar", ""}}, {"foo", "bar"}, ), + ( + {"filter": [{"ref": "collection"}, {"not": {"isEmpty": {"ref": "@value"}}}]}, + {"collection": {1: "foo", 2: "bar", 3: ""}}, + {1: "foo", 2: "bar"}, + ), + ( + {"filter": [{"ref": "collection"}, {"not": {"isEmpty": {"ref": "@key"}}}]}, + {"collection": {"foo": 1, "bar": 2, "": 3}}, + {"foo": 1, "bar": 2}, + ), ({"contains": [{"ref": "payload"}, "hello"]}, {"payload": CustomObject("contains")}, SideEffect), ( {"contains": [{"ref": "payload"}, "hello"]}, diff --git a/tests/integration/test_debug.py b/tests/integration/test_debug.py index f5453f353fe..b2f973b1a48 100644 --- a/tests/integration/test_debug.py +++ b/tests/integration/test_debug.py @@ -3,8 +3,6 @@ import os import re import subprocess -from typing import List -from typing import Optional import mock import pytest @@ -13,11 +11,10 @@ import ddtrace._trace.sampler from ddtrace.internal import debug from ddtrace.internal.writer import AgentWriter -from ddtrace.internal.writer import TraceWriter -from ddtrace.trace import Span from tests.integration.utils import AGENT_VERSION from tests.subprocesstest import SubprocessTestCase from tests.subprocesstest import run_in_subprocess +from tests.utils import DummyTracer pytestmark = pytest.mark.skipif(AGENT_VERSION == "testagent", reason="The test agent doesn't support startup logs.") @@ -36,7 +33,6 @@ def __eq__(self, other): @pytest.mark.subprocess() def test_standard_tags(): from datetime import datetime - import sys import ddtrace from ddtrace.internal import debug @@ -75,14 +71,6 @@ def test_standard_tags(): in_venv = f.get("in_virtual_env") assert in_venv is True - lang_version = f.get("lang_version") - if sys.version_info == (3, 7, 0): - assert "3.7" in lang_version - elif sys.version_info == (3, 6, 0): - assert "3.6" in lang_version - elif sys.version_info == (2, 7, 0): - assert "2.7" in lang_version - agent_url = f.get("agent_url") assert agent_url == "http://localhost:8126" @@ -198,13 +186,12 @@ def test_trace_agent_url(self): ) ) def test_tracer_loglevel_info_connection(self): - tracer = ddtrace.trace.Tracer() logging.basicConfig(level=logging.INFO) with mock.patch.object(logging.Logger, "log") as mock_logger: # shove an unserializable object into the config log output # regression: this used to cause an exception to be raised ddtrace.config.version = AgentWriter(agent_url="foobar") - tracer._configure() + ddtrace.trace.tracer.configure() assert mock.call(logging.INFO, re_matcher("- DATADOG TRACER CONFIGURATION - ")) in mock_logger.mock_calls @run_in_subprocess( @@ -214,10 +201,9 @@ def test_tracer_loglevel_info_connection(self): ) ) def test_tracer_loglevel_info_no_connection(self): - tracer = ddtrace.trace.Tracer() logging.basicConfig(level=logging.INFO) with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer._configure() + ddtrace.trace.tracer.configure() assert mock.call(logging.INFO, re_matcher("- DATADOG TRACER CONFIGURATION - ")) in mock_logger.mock_calls assert mock.call(logging.WARNING, re_matcher("- DATADOG TRACER DIAGNOSTIC - ")) in mock_logger.mock_calls @@ -228,9 +214,8 @@ def test_tracer_loglevel_info_no_connection(self): ) ) def test_tracer_log_disabled_error(self): - tracer = ddtrace.trace.Tracer() with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer._configure() + ddtrace.trace.tracer._configure() assert mock_logger.mock_calls == [] @run_in_subprocess( @@ -240,9 +225,8 @@ def test_tracer_log_disabled_error(self): ) ) def test_tracer_log_disabled(self): - tracer = ddtrace.trace.Tracer() with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer._configure() + ddtrace.trace.tracer._configure() assert mock_logger.mock_calls == [] @run_in_subprocess( @@ -252,9 +236,8 @@ def test_tracer_log_disabled(self): ) def test_tracer_info_level_log(self): logging.basicConfig(level=logging.INFO) - tracer = ddtrace.trace.Tracer() with mock.patch.object(logging.Logger, "log") as mock_logger: - tracer._configure() + ddtrace.trace.tracer._configure() assert mock_logger.mock_calls == [] @@ -296,16 +279,24 @@ def test_to_json(): json.dumps(info) +@pytest.mark.subprocess(env={"AWS_LAMBDA_FUNCTION_NAME": "something"}) def test_agentless(monkeypatch): - monkeypatch.setenv("AWS_LAMBDA_FUNCTION_NAME", "something") - tracer = ddtrace.trace.Tracer() - info = debug.collect(tracer) + from ddtrace.internal import debug + from ddtrace.trace import tracer + info = debug.collect(tracer) assert info.get("agent_url") == "AGENTLESS" +@pytest.mark.subprocess() def test_custom_writer(): - tracer = ddtrace.trace.Tracer() + from typing import List + from typing import Optional + + from ddtrace.internal import debug + from ddtrace.internal.writer import TraceWriter + from ddtrace.trace import Span + from ddtrace.trace import tracer class CustomWriter(TraceWriter): def recreate(self) -> TraceWriter: @@ -326,16 +317,24 @@ def flush_queue(self) -> None: assert info.get("agent_url") == "CUSTOM" +@pytest.mark.subprocess() def test_different_samplers(): - tracer = ddtrace.trace.Tracer() + import ddtrace + from ddtrace.internal import debug + from ddtrace.trace import tracer + tracer._configure(sampler=ddtrace._trace.sampler.RateSampler()) info = debug.collect(tracer) assert info.get("sampler_type") == "RateSampler" +@pytest.mark.subprocess() def test_startup_logs_sampling_rules(): - tracer = ddtrace.trace.Tracer() + import ddtrace + from ddtrace.internal import debug + from ddtrace.trace import tracer + sampler = ddtrace._trace.sampler.DatadogSampler(rules=[ddtrace._trace.sampler.SamplingRule(sample_rate=1.0)]) tracer._configure(sampler=sampler) f = debug.collect(tracer) @@ -424,7 +423,7 @@ def test_debug_span_log(): def test_partial_flush_log(): - tracer = ddtrace.trace.Tracer() + tracer = DummyTracer() tracer._configure( partial_flush_enabled=True, diff --git a/tests/integration/test_encoding.py b/tests/integration/test_encoding.py index e3f5037e7b3..ff47679af47 100644 --- a/tests/integration/test_encoding.py +++ b/tests/integration/test_encoding.py @@ -4,7 +4,7 @@ import mock import pytest -from ddtrace.trace import Tracer +from ddtrace.trace import tracer AGENT_VERSION = os.environ.get("AGENT_VERSION") @@ -12,7 +12,6 @@ class TestTraceAcceptedByAgent: def test_simple_trace_accepted_by_agent(self): - tracer = Tracer() with mock.patch("ddtrace.internal.writer.writer.log") as log: with tracer.trace("root"): for _ in range(999): @@ -32,7 +31,6 @@ def test_simple_trace_accepted_by_agent(self): ) def test_trace_with_meta_accepted_by_agent(self, tags): """Meta tags should be text types.""" - tracer = Tracer() with mock.patch("ddtrace.internal.writer.writer.log") as log: with tracer.trace("root", service="test_encoding", resource="test_resource") as root: root.set_tags(tags) @@ -53,7 +51,6 @@ def test_trace_with_meta_accepted_by_agent(self, tags): ) def test_trace_with_metrics_accepted_by_agent(self, metrics): """Metric tags should be numeric types - i.e. int, float, long (py3), and str numbers.""" - tracer = Tracer() with mock.patch("ddtrace.internal.writer.writer.log") as log: with tracer.trace("root") as root: root.set_metrics(metrics) @@ -72,7 +69,6 @@ def test_trace_with_metrics_accepted_by_agent(self, metrics): ) def test_trace_with_links_accepted_by_agent(self, span_links_kwargs): """Links should not break things.""" - tracer = Tracer() with mock.patch("ddtrace.internal.writer.writer.log") as log: with tracer.trace("root", service="test_encoding", resource="test_resource") as root: root.set_link(**span_links_kwargs) diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py index 70cc84cdbfa..fd29cc18231 100644 --- a/tests/integration/test_integration.py +++ b/tests/integration/test_integration.py @@ -9,10 +9,10 @@ from ddtrace.internal.atexit import register_on_exit_signal from ddtrace.internal.runtime import container -from ddtrace.trace import Tracer from tests.integration.utils import import_ddtrace_in_subprocess from tests.integration.utils import parametrize_with_all_encodings from tests.integration.utils import skip_if_testagent +from tests.utils import DummyTracer from tests.utils import call_program @@ -37,7 +37,7 @@ def test_configure_keeps_api_hostname_and_port(): @mock.patch("signal.getsignal") def test_shutdown_on_exit_signal(mock_get_signal, mock_signal): mock_get_signal.return_value = None - tracer = Tracer() + tracer = DummyTracer() register_on_exit_signal(tracer._atexit) assert mock_signal.call_count == 2 assert mock_signal.call_args_list[0][0][0] == signal.SIGTERM diff --git a/tests/integration/test_integration_civisibility.py b/tests/integration/test_integration_civisibility.py index cc633d12018..9e01d47b756 100644 --- a/tests/integration/test_integration_civisibility.py +++ b/tests/integration/test_integration_civisibility.py @@ -10,7 +10,7 @@ from ddtrace.internal.ci_visibility.constants import EVP_PROXY_AGENT_ENDPOINT from ddtrace.internal.ci_visibility.constants import EVP_SUBDOMAIN_HEADER_EVENT_VALUE from ddtrace.internal.ci_visibility.constants import EVP_SUBDOMAIN_HEADER_NAME -from ddtrace.internal.ci_visibility.recorder import CIVisibilityTracer as Tracer +from ddtrace.internal.ci_visibility.recorder import CIVisibilityTracer from tests.ci_visibility.util import _get_default_civisibility_ddconfig from tests.utils import override_env @@ -36,7 +36,7 @@ def test_civisibility_intake_with_evp_available(): with override_env( dict(DD_API_KEY="foobar.baz", DD_SITE="foo.bar", DD_CIVISIBILITY_AGENTLESS_ENABLED="0") ), mock.patch("ddtrace.internal.ci_visibility.recorder.ddconfig", _get_default_civisibility_ddconfig()): - t = Tracer() + t = CIVisibilityTracer() CIVisibility.enable(tracer=t) assert CIVisibility._instance.tracer._writer._endpoint == EVP_PROXY_AGENT_ENDPOINT assert CIVisibility._instance.tracer._writer.intake_url == agent.get_trace_url() @@ -64,7 +64,7 @@ def test_civisibility_intake_with_apikey(): with override_env( dict(DD_API_KEY="foobar.baz", DD_SITE="foo.bar", DD_CIVISIBILITY_AGENTLESS_ENABLED="1") ), mock.patch("ddtrace.internal.ci_visibility.recorder.ddconfig", _get_default_civisibility_ddconfig()): - t = Tracer() + t = CIVisibilityTracer() CIVisibility.enable(tracer=t) assert CIVisibility._instance.tracer._writer._endpoint == AGENTLESS_ENDPOINT assert CIVisibility._instance.tracer._writer.intake_url == "https://citestcycle-intake.foo.bar" diff --git a/tests/integration/test_integration_snapshots.py b/tests/integration/test_integration_snapshots.py index 0ba978fa260..eab323bf319 100644 --- a/tests/integration/test_integration_snapshots.py +++ b/tests/integration/test_integration_snapshots.py @@ -5,7 +5,6 @@ import mock import pytest -from ddtrace.trace import Tracer from ddtrace.trace import tracer from tests.integration.utils import AGENT_VERSION from tests.integration.utils import mark_snapshot @@ -214,7 +213,6 @@ def test_trace_with_wrong_meta_types_not_sent(encoding, meta, monkeypatch): def test_trace_with_wrong_metrics_types_not_sent(encoding, metrics, monkeypatch): """Wrong metric types should raise TypeErrors during encoding and fail to send to the agent.""" with override_global_config(dict(_trace_api=encoding)): - tracer = Tracer() with mock.patch("ddtrace._trace.span.log") as log: with tracer.trace("root") as root: root._metrics = metrics diff --git a/tests/integration/test_priority_sampling.py b/tests/integration/test_priority_sampling.py index 57b64a2fe5c..32fc4e0dcee 100644 --- a/tests/integration/test_priority_sampling.py +++ b/tests/integration/test_priority_sampling.py @@ -8,7 +8,7 @@ from ddtrace.internal.encoding import JSONEncoder from ddtrace.internal.encoding import MsgpackEncoderV04 as Encoder from ddtrace.internal.writer import AgentWriter -from ddtrace.trace import Tracer +from ddtrace.trace import tracer as ddtracer from tests.integration.utils import AGENT_VERSION from tests.integration.utils import parametrize_with_all_encodings from tests.integration.utils import skip_if_testagent @@ -115,18 +115,16 @@ def test_priority_sampling_response(): @pytest.mark.snapshot(agent_sample_rate_by_service={"service:test,env:": 0.9999}) def test_agent_sample_rate_keep(): """Ensure that the agent sample rate is respected when a trace is auto sampled.""" - tracer = Tracer() - # First trace won't actually have the sample rate applied since the response has not yet been received. - with tracer.trace(""): + with ddtracer.trace(""): pass # Force a flush to get the response back. - tracer.flush() + ddtracer.flush() # Subsequent traces should have the rate applied. - with tracer.trace("test", service="test") as span: + with ddtracer.trace("test", service="test") as span: pass - tracer.flush() + ddtracer.flush() assert span.get_metric("_dd.agent_psr") == pytest.approx(0.9999) assert span.get_metric("_sampling_priority_v1") == AUTO_KEEP assert span.get_tag("_dd.p.dm") == "-1" @@ -136,21 +134,17 @@ def test_agent_sample_rate_keep(): @pytest.mark.snapshot(agent_sample_rate_by_service={"service:test,env:": 0.0001}) def test_agent_sample_rate_reject(): """Ensure that the agent sample rate is respected when a trace is auto rejected.""" - from ddtrace.trace import Tracer - - tracer = Tracer() - # First trace won't actually have the sample rate applied since the response has not yet been received. - with tracer.trace(""): + with ddtracer.trace(""): pass # Force a flush to get the response back. - tracer.flush() + ddtracer.flush() # Subsequent traces should have the rate applied. - with tracer.trace("test", service="test") as span: + with ddtracer.trace("test", service="test") as span: pass - tracer.flush() + ddtracer.flush() assert span.get_metric("_dd.agent_psr") == pytest.approx(0.0001) assert span.get_metric("_sampling_priority_v1") == AUTO_REJECT assert span.get_tag("_dd.p.dm") == "-1" diff --git a/tests/integration/test_settings.py b/tests/integration/test_settings.py index 249b0211bb4..ba9bcd66f37 100644 --- a/tests/integration/test_settings.py +++ b/tests/integration/test_settings.py @@ -20,7 +20,7 @@ def test_setting_origin_environment(test_agent_session, run_python_code_in_subpr env = os.environ.copy() env.update( { - "DD_TRACE_SAMPLE_RATE": "0.1", + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.1}]', "DD_LOGS_INJECTION": "true", "DD_TRACE_HEADER_TAGS": "X-Header-Tag-1:header_tag_1,X-Header-Tag-2:header_tag_2", "DD_TAGS": "team:apm,component:web", @@ -39,11 +39,11 @@ def test_setting_origin_environment(test_agent_session, run_python_code_in_subpr assert status == 0, err events = test_agent_session.get_events(subprocess=True) - events_trace_sample_rate = _get_telemetry_config_items(events, "DD_TRACE_SAMPLE_RATE") + events_trace_sample_rate = _get_telemetry_config_items(events, "DD_TRACE_SAMPLING_RULES") assert { - "name": "DD_TRACE_SAMPLE_RATE", - "value": 0.1, + "name": "DD_TRACE_SAMPLING_RULES", + "value": '[{"sample_rate":0.1}]', "origin": "env_var", } in events_trace_sample_rate @@ -69,7 +69,6 @@ def test_setting_origin_code(test_agent_session, run_python_code_in_subprocess): env = os.environ.copy() env.update( { - "DD_TRACE_SAMPLE_RATE": "0.1", "DD_LOGS_INJECTION": "true", "DD_TRACE_HEADER_TAGS": "X-Header-Tag-1:header_tag_1,X-Header-Tag-2:header_tag_2", "DD_TAGS": "team:apm,component:web", @@ -81,7 +80,6 @@ def test_setting_origin_code(test_agent_session, run_python_code_in_subprocess): """ from ddtrace import config, tracer -config._trace_sample_rate = 0.2 config._logs_injection = False config._trace_http_header_tags = {"header": "value"} config.tags = {"header": "value"} @@ -96,12 +94,6 @@ def test_setting_origin_code(test_agent_session, run_python_code_in_subprocess): assert status == 0, err events = test_agent_session.get_events(subprocess=True) - events_trace_sample_rate = _get_telemetry_config_items(events, "DD_TRACE_SAMPLE_RATE") - assert { - "name": "DD_TRACE_SAMPLE_RATE", - "value": 0.2, - "origin": "code", - } in events_trace_sample_rate events_logs_injection_enabled = _get_telemetry_config_items(events, "DD_LOGS_INJECTION") assert { @@ -174,8 +166,8 @@ def test_remoteconfig_sampling_rate_default(test_agent_session, run_python_code_ assert status == 0, err events = test_agent_session.get_events(subprocess=True) - events_trace_sample_rate = _get_telemetry_config_items(events, "DD_TRACE_SAMPLE_RATE") - assert {"name": "DD_TRACE_SAMPLE_RATE", "value": 1.0, "origin": "default"} in events_trace_sample_rate + events_trace_sample_rate = _get_telemetry_config_items(events, "trace_sample_rate") + assert {"name": "trace_sample_rate", "value": 1.0, "origin": "default"} in events_trace_sample_rate @pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") @@ -191,7 +183,22 @@ def test_remoteconfig_sampling_rate_telemetry(test_agent_session, run_python_cod from ddtrace import config, tracer from tests.internal.test_settings import _base_rc_config -config._handle_remoteconfig(_base_rc_config({"tracing_sampling_rate": 0.5})) +config._handle_remoteconfig( + _base_rc_config( + { + "tracing_sampling_rules": [ + { + "sample_rate": "0.5", + "service": "*", + "name": "*", + "resource": "*", + "tags": {}, + "provenance": "customer", + } + ] + } + ) +) with tracer.trace("test") as span: pass assert span.get_metric("_dd.rule_psr") == 0.5 @@ -201,8 +208,13 @@ def test_remoteconfig_sampling_rate_telemetry(test_agent_session, run_python_cod assert status == 0, err events = test_agent_session.get_events(subprocess=True) - events_trace_sample_rate = _get_telemetry_config_items(events, "DD_TRACE_SAMPLE_RATE") - assert {"name": "DD_TRACE_SAMPLE_RATE", "value": 0.5, "origin": "remote_config"} in events_trace_sample_rate + events_trace_sample_rate = _get_telemetry_config_items(events, "DD_TRACE_SAMPLING_RULES") + assert { + "name": "DD_TRACE_SAMPLING_RULES", + "origin": "remote_config", + "value": '[{"sample_rate": "0.5", "service": "*", "name": "*", "resource": "*", ' + '"tags": {}, "provenance": "customer"}]', + } in events_trace_sample_rate @pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent") @@ -226,9 +238,11 @@ def test_remoteconfig_header_tags_telemetry(test_agent_session, run_python_code_ {"header": "used-with-default", "tag_name":""}] })) with tracer.trace("test") as span: - trace_utils.set_http_meta(span, - config.falcon, # randomly chosen http integration config - request_headers={"used": "foobarbanana", "used-with-default": "defaultname"}) + trace_utils.set_http_meta( + span, + config.falcon, # randomly chosen http integration config + request_headers={"used": "foobarbanana", "used-with-default": "defaultname"}, + ) assert span.get_tag("header_tag_69") == "foobarbanana" assert span.get_tag("header_tag_70") is None assert span.get_tag("http.request.headers.used-with-default") == "defaultname" diff --git a/tests/integration/test_tracemethods.py b/tests/integration/test_tracemethods.py index 15129c56161..7353c12182a 100644 --- a/tests/integration/test_tracemethods.py +++ b/tests/integration/test_tracemethods.py @@ -27,14 +27,10 @@ "mod.mod2.mod3:Class.test_method,Class.test_method2", [("mod.mod2.mod3", "Class.test_method"), ("mod.mod2.mod3", "Class.test_method2")], ), - ("module[method1, method2]", []), ("module", []), ("module.", []), ("module.method", []), - ("module.method[m1,m2,]", []), ("module.method;module.method", []), - ("module.method[m1];module.method[m1,m2,]", []), - ("module.method[[m1]", []), ], ) def test_trace_methods_parse(dd_trace_methods: str, expected_output: List[Tuple[str, str]]): @@ -43,37 +39,6 @@ def test_trace_methods_parse(dd_trace_methods: str, expected_output: List[Tuple[ assert _parse_trace_methods(dd_trace_methods) == expected_output -def test_legacy_trace_methods_parse(): - from ddtrace.internal.tracemethods import _parse_legacy_trace_methods - - assert _parse_legacy_trace_methods("") == [] - assert _parse_legacy_trace_methods("module[method1]") == ["module.method1"] - assert _parse_legacy_trace_methods("module[method1,method2]") == ["module.method1", "module.method2"] - assert _parse_legacy_trace_methods("module[method1,method2];mod2[m1,m2]") == [ - "module.method1", - "module.method2", - "mod2.m1", - "mod2.m2", - ] - assert _parse_legacy_trace_methods("mod.submod[m1,m2,m3]") == ["mod.submod.m1", "mod.submod.m2", "mod.submod.m3"] - assert _parse_legacy_trace_methods("mod.submod.subsubmod[m1,m2]") == [ - "mod.submod.subsubmod.m1", - "mod.submod.subsubmod.m2", - ] - assert _parse_legacy_trace_methods("mod.mod2.mod3.Class[test_method,test_method2]") == [ - "mod.mod2.mod3.Class.test_method", - "mod.mod2.mod3.Class.test_method2", - ] - assert _parse_legacy_trace_methods("module[method1, method2]") == [] - assert _parse_legacy_trace_methods("module") == [] - assert _parse_legacy_trace_methods("module.") == [] - assert _parse_legacy_trace_methods("module.method") == [] - assert _parse_legacy_trace_methods("module.method[m1,m2,]") == [] - assert _parse_legacy_trace_methods("module.method;module.method") == [] - assert _parse_legacy_trace_methods("module.method[m1];module.method[m1,m2,]") == [] - assert _parse_legacy_trace_methods("module.method[[m1]") == [] - - def _test_method(): pass @@ -105,9 +70,9 @@ def test_method(self): ddtrace_run=True, env=dict( DD_TRACE_METHODS=( - "tests.integration.test_tracemethods[_test_method,_test_method2];" - "tests.integration.test_tracemethods._Class[test_method,test_method2];" - "tests.integration.test_tracemethods._Class.NestedClass[test_method]" + "tests.integration.test_tracemethods:_test_method,_test_method2;" + "tests.integration.test_tracemethods:_Class.test_method,_Class.test_method2;" + "tests.integration.test_tracemethods:_Class.NestedClass.test_method" ) ), ) @@ -139,8 +104,8 @@ async def _async_test_method2(): def test_ddtrace_run_trace_methods_async(ddtrace_run_python_code_in_subprocess): env = os.environ.copy() env["DD_TRACE_METHODS"] = ( - "tests.integration.test_tracemethods[_async_test_method,_async_test_method2];" - "tests.integration.test_tracemethods._Class[async_test_method]" + "tests.integration.test_tracemethods:_async_test_method,_async_test_method2;" + "tests.integration.test_tracemethods:_Class.async_test_method" ) tests_dir = os.path.dirname(os.path.dirname(__file__)) env["PYTHONPATH"] = os.pathsep.join([tests_dir, env.get("PYTHONPATH", "")]) diff --git a/tests/integration/utils.py b/tests/integration/utils.py index 5b87161e2d0..53640d82b57 100644 --- a/tests/integration/utils.py +++ b/tests/integration/utils.py @@ -5,8 +5,6 @@ import mock import pytest -from ddtrace.trace import Tracer - AGENT_VERSION = os.environ.get("AGENT_VERSION") @@ -28,7 +26,8 @@ def encode_traces(self, traces): def send_invalid_payload_and_get_logs(encoder_cls=BadEncoder): - t = Tracer() + from ddtrace.trace import tracer as t + for client in t._writer._clients: client.encoder = encoder_cls() with mock.patch("ddtrace.internal.writer.writer.log") as log: diff --git a/tests/internal/test_settings.py b/tests/internal/test_settings.py index a3f5fa97802..a26d692eea4 100644 --- a/tests/internal/test_settings.py +++ b/tests/internal/test_settings.py @@ -62,22 +62,36 @@ def _deleted_rc_config(): }, }, { - "env": {"DD_TRACE_SAMPLE_RATE": "0.9"}, - "expected": {"_trace_sample_rate": 0.9}, - "expected_source": {"_trace_sample_rate": "env_var"}, + "env": {"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.91}]'}, + "expected": {"_trace_sampling_rules": '[{"sample_rate":0.91}]'}, + "expected_source": {"_trace_sampling_rules": "env_var"}, }, { - "env": {"DD_TRACE_SAMPLE_RATE": "0.9"}, - "code": {"_trace_sample_rate": 0.8}, - "expected": {"_trace_sample_rate": 0.8}, - "expected_source": {"_trace_sample_rate": "code"}, + "env": {"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.92}]'}, + "code": {"_trace_sampling_rules": '[{"sample_rate":0.82}]'}, + "expected": {"_trace_sampling_rules": '[{"sample_rate":0.82}]'}, + "expected_source": {"_trace_sampling_rules": "code"}, }, { - "env": {"DD_TRACE_SAMPLE_RATE": "0.9"}, - "code": {"_trace_sample_rate": 0.8}, - "rc": {"tracing_sampling_rate": 0.7}, - "expected": {"_trace_sample_rate": 0.7}, - "expected_source": {"_trace_sample_rate": "remote_config"}, + "env": {"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.93}]'}, + "code": {"_trace_sampling_rules": '[{"sample_rate":0.83}]'}, + "rc": { + "tracing_sampling_rules": [ + { + "sample_rate": "0.73", + "service": "*", + "name": "*", + "resource": "*", + "tags": [], + "provenance": "customer", + } + ] + }, + "expected": { + "_trace_sampling_rules": '[{"sample_rate": "0.73", "service": "*", "name": "*", ' + '"resource": "*", "tags": [], "provenance": "customer"}]', + }, + "expected_source": {"_trace_sampling_rules": "remote_config"}, }, { "env": {"DD_LOGS_INJECTION": "true"}, @@ -227,60 +241,6 @@ def test_config_subscription(config): _handler.assert_called_once_with(config, [s]) -def test_remoteconfig_sampling_rate_user(run_python_code_in_subprocess): - env = os.environ.copy() - env.update({"DD_TRACE_SAMPLE_RATE": "0.1"}) - out, err, status, _ = run_python_code_in_subprocess( - """ -from ddtrace import config, tracer -from ddtrace._trace.sampler import DatadogSampler -from tests.internal.test_settings import _base_rc_config, _deleted_rc_config - -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.1 - -config._handle_remoteconfig(_base_rc_config({"tracing_sampling_rate": 0.2})) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.2 - -config._handle_remoteconfig(_base_rc_config({})) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.1 - -custom_sampler = DatadogSampler(default_sample_rate=0.3) -tracer._configure(sampler=custom_sampler) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.3 - -config._handle_remoteconfig(_base_rc_config({"tracing_sampling_rate": 0.4})) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.4 - -config._handle_remoteconfig(_base_rc_config({})) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.3 - -config._handle_remoteconfig(_base_rc_config({"tracing_sampling_rate": 0.4})) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.4 - -config._handle_remoteconfig(_deleted_rc_config()) -with tracer.trace("test") as span: - pass -assert span.get_metric("_dd.rule_psr") == 0.3 - """, - env=env, - ) - assert status == 0, err.decode("utf-8") - - def test_remoteconfig_sampling_rules(run_python_code_in_subprocess): env = os.environ.copy() env.update({"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.1, "name":"test"}]'}) @@ -368,13 +328,12 @@ def test_remoteconfig_sampling_rules(run_python_code_in_subprocess): assert status == 0, err.decode("utf-8") -def test_remoteconfig_sample_rate_and_rules(run_python_code_in_subprocess): +def test_remoteconfig_global_sample_rate_and_rules(run_python_code_in_subprocess): """There is complex logic regarding the interaction between setting new sample rates and rules with remote config. """ env = os.environ.copy() - env.update({"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.9, "name":"rules"}]'}) - env.update({"DD_TRACE_SAMPLE_RATE": "0.8"}) + env.update({"DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.9, "name":"rules"}, {"sample_rate":0.8}]'}) out, err, status, _ = run_python_code_in_subprocess( """ @@ -410,8 +369,9 @@ def test_remoteconfig_sample_rate_and_rules(run_python_code_in_subprocess): with tracer.trace("sample_rate") as span: pass -assert span.get_metric("_dd.rule_psr") == 0.8 -assert span.get_tag("_dd.p.dm") == "-3" +# Global sampling rule was overwritten +assert span.get_metric("_dd.rule_psr") is None +assert span.get_tag("_dd.p.dm") == "-0" config._handle_remoteconfig(_base_rc_config({"tracing_sampling_rate": 0.2})) @@ -482,8 +442,8 @@ def test_remoteconfig_sample_rate_and_rules(run_python_code_in_subprocess): with tracer.trace("sample_rate") as span: pass -assert span.get_metric("_dd.rule_psr") == 0.8 -assert span.get_tag("_dd.p.dm") == "-3" +assert span.get_metric("_dd.rule_psr") is None +assert span.get_tag("_dd.p.dm") == "-0" """, env=env, @@ -615,3 +575,28 @@ def test_remoteconfig_header_tags(run_python_code_in_subprocess): env=env, ) assert status == 0, f"err={err.decode('utf-8')} out={out.decode('utf-8')}" + + +def test_config_public_properties_and_methods(): + # Regression test to prevent unexpected changes to public attributes in Config + # By default most attributes should be private and set via Environment Variables + from ddtrace.settings import Config + + public_attrs = set() + c = Config() + # Check for public attributes in Config + for attr in dir(c): + if not attr.startswith("_") and not attr.startswith("__"): + public_attrs.add(attr) + # Check for public keys in Config._config + for key in c._config: + if not key.startswith("_"): + public_attrs.add(key) + + assert public_attrs == { + "service", + "service_mapping", + "env", + "tags", + "version", + }, public_attrs diff --git a/tests/llmobs/test_llmobs_ragas_evaluators.py b/tests/llmobs/test_llmobs_ragas_evaluators.py index c46dce740c2..a182653455a 100644 --- a/tests/llmobs/test_llmobs_ragas_evaluators.py +++ b/tests/llmobs/test_llmobs_ragas_evaluators.py @@ -7,7 +7,7 @@ from ddtrace.llmobs._evaluators.ragas.context_precision import RagasContextPrecisionEvaluator from ddtrace.llmobs._evaluators.ragas.faithfulness import RagasFaithfulnessEvaluator from ddtrace.llmobs._evaluators.runner import EvaluatorRunner -from ddtrace.span import Span +from ddtrace.trace import Span from tests.llmobs._utils import _expected_llmobs_llm_span_event from tests.llmobs._utils import _expected_ragas_answer_relevancy_spans from tests.llmobs._utils import _expected_ragas_context_precision_spans diff --git a/tests/opentelemetry/test_config.py b/tests/opentelemetry/test_config.py index 39a43128e9e..d5e9bf570fd 100644 --- a/tests/opentelemetry/test_config.py +++ b/tests/opentelemetry/test_config.py @@ -1,6 +1,24 @@ import pytest +def _global_sampling_rule(): + from ddtrace._trace.sampling_rule import SamplingRule + from ddtrace.trace import tracer + + assert hasattr(tracer._sampler, "rules") + + for rule in tracer._sampler.rules: + if ( + rule.service == SamplingRule.NO_RULE + and rule.name == SamplingRule.NO_RULE + and rule.resource == SamplingRule.NO_RULE + and rule.tags == SamplingRule.NO_RULE + and rule.provenance == "default" + ): + return rule + assert False, "Rule not found" + + @pytest.mark.subprocess( env={ "OTEL_SERVICE_NAME": "Test", @@ -10,7 +28,7 @@ "OTEL_PROPAGATORS": "jaegar, tracecontext, b3", "DD_TRACE_PROPAGATION_STYLE": "b3", "OTEL_TRACES_SAMPLER": "always_off", - "DD_TRACE_SAMPLE_RATE": "1.0", + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.1}]', "OTEL_TRACES_EXPORTER": "True", "DD_TRACE_ENABLED": "True", "OTEL_METRICS_EXPORTER": "none", @@ -26,11 +44,12 @@ ) def test_dd_otel_mixed_env_configuration(): from ddtrace import config + from tests.opentelemetry.test_config import _global_sampling_rule assert config.service == "DD_service_test", config.service assert config._debug_mode is False, config._debug_mode assert config._propagation_style_extract == ["b3"], config._propagation_style_extract - assert config._trace_sample_rate == 1.0, config._trace_sample_rate + assert _global_sampling_rule().sample_rate == 0.1 assert config._tracing_enabled is True, config._tracing_enabled assert config._runtime_metrics_enabled is True, config._runtime_metrics_enabled assert config._otel_enabled is True, config._otel_enabled @@ -45,7 +64,7 @@ def test_dd_otel_mixed_env_configuration(): "OTEL_LOG_LEVEL": "debug", "OTEL_PROPAGATORS": "jaegar, tracecontext, b3", "OTEL_TRACES_SAMPLER": "always_off", - "DD_TRACE_SAMPLE_RATE": "1.0", + "DD_TRACE_SAMPLING_RULES": '[{"sample_rate":0.9}]', "OTEL_TRACES_EXPORTER": "OTLP", "OTEL_METRICS_EXPORTER": "none", "OTEL_LOGS_EXPORTER": "warning", @@ -59,13 +78,14 @@ def test_dd_otel_mixed_env_configuration(): ) def test_dd_otel_missing_dd_env_configuration(): from ddtrace import config + from tests.opentelemetry.test_config import _global_sampling_rule assert config.service == "Test", config.service assert config.version == "1.0" assert config._otel_enabled is True, config._otel_enabled assert config._debug_mode is True, config._debug_mode assert config._propagation_style_extract == ["tracecontext", "b3"], config._propagation_style_extract - assert config._trace_sample_rate == 1.0, config._trace_sample_rate + assert _global_sampling_rule().sample_rate == 0.9 assert config._tracing_enabled is True, config._tracing_enabled assert config._runtime_metrics_enabled is False, config._runtime_metrics_enabled assert config.tags == { @@ -133,8 +153,9 @@ def test_otel_propagation_style_configuration_unsupportedwarning(): ) def test_otel_traces_sampler_configuration_alwayson(): from ddtrace import config + from tests.opentelemetry.test_config import _global_sampling_rule - assert config._trace_sample_rate == 1.0, config._trace_sample_rate + assert _global_sampling_rule().sample_rate == 1.0, config._trace_sample_rate @pytest.mark.subprocess( @@ -143,8 +164,9 @@ def test_otel_traces_sampler_configuration_alwayson(): ) def test_otel_traces_sampler_configuration_ignore_parent(): from ddtrace import config + from tests.opentelemetry.test_config import _global_sampling_rule - assert config._trace_sample_rate == 1.0, config._trace_sample_rate + assert _global_sampling_rule().sample_rate == 1.0, config._trace_sample_rate @pytest.mark.subprocess( @@ -153,8 +175,9 @@ def test_otel_traces_sampler_configuration_ignore_parent(): ) def test_otel_traces_sampler_configuration_alwaysoff(): from ddtrace import config + from tests.opentelemetry.test_config import _global_sampling_rule - assert config._trace_sample_rate == 0.0, config._trace_sample_rate + assert _global_sampling_rule().sample_rate == 0.0, config._trace_sample_rate @pytest.mark.subprocess( @@ -167,8 +190,9 @@ def test_otel_traces_sampler_configuration_alwaysoff(): ) def test_otel_traces_sampler_configuration_traceidratio(): from ddtrace import config + from tests.opentelemetry.test_config import _global_sampling_rule - assert config._trace_sample_rate == 0.5, config._trace_sample_rate + assert _global_sampling_rule().sample_rate == 0.5, config._trace_sample_rate @pytest.mark.subprocess(env={"OTEL_TRACES_EXPORTER": "none"}) diff --git a/tests/opentracer/core/test_dd_compatibility.py b/tests/opentracer/core/test_dd_compatibility.py index 4ba14b0618f..c68b5ca6d6c 100644 --- a/tests/opentracer/core/test_dd_compatibility.py +++ b/tests/opentracer/core/test_dd_compatibility.py @@ -15,14 +15,6 @@ def test_ottracer_uses_global_ddtracer(self): tracer = ddtrace.opentracer.Tracer() assert tracer._dd_tracer is ddtrace.tracer - def test_custom_ddtracer(self): - """A user should be able to specify their own Datadog tracer instance if - they wish. - """ - custom_dd_tracer = ddtrace.trace.Tracer() - tracer = ddtrace.opentracer.Tracer(dd_tracer=custom_dd_tracer) - assert tracer._dd_tracer is custom_dd_tracer - def test_ot_dd_global_tracers(self, global_tracer): """Ensure our test function opentracer_init() prep""" ot_tracer = global_tracer diff --git a/tests/opentracer/core/test_tracer.py b/tests/opentracer/core/test_tracer.py index a0a18ff0dd8..f5534c8f1b0 100644 --- a/tests/opentracer/core/test_tracer.py +++ b/tests/opentracer/core/test_tracer.py @@ -15,8 +15,6 @@ from ddtrace.opentracer.span_context import SpanContext from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID from ddtrace.settings import ConfigException -from ddtrace.trace import Tracer as DDTracer -from tests.utils import override_global_config class TestTracerConfig(object): @@ -69,12 +67,6 @@ def test_invalid_config_key(self): assert ["enabeld", "setttings"] in str(ce_info) # codespell:ignore assert tracer is not None - def test_ddtrace_fallback_config(self): - """Ensure datadog configuration is used by default.""" - with override_global_config(dict(_tracing_enabled=False)): - tracer = Tracer(dd_tracer=DDTracer()) - assert tracer._dd_tracer.enabled is False - def test_global_tags(self): """Global tags should be passed from the opentracer to the tracer.""" config = { diff --git a/tests/opentracer/utils.py b/tests/opentracer/utils.py index 6a34052a385..85b84865ad8 100644 --- a/tests/opentracer/utils.py +++ b/tests/opentracer/utils.py @@ -7,5 +7,5 @@ def init_tracer(service_name, dd_tracer, scope_manager=None): It accepts a Datadog tracer that should be the same one used for testing. """ - ot_tracer = Tracer(service_name, dd_tracer=dd_tracer, scope_manager=scope_manager) + ot_tracer = Tracer(service_name, scope_manager=scope_manager, _dd_tracer=dd_tracer) return ot_tracer diff --git a/tests/profiling/collector/conftest.py b/tests/profiling/collector/conftest.py index a774b20f7da..a53ac79bcad 100644 --- a/tests/profiling/collector/conftest.py +++ b/tests/profiling/collector/conftest.py @@ -2,12 +2,13 @@ import ddtrace from ddtrace.profiling import Profiler +from tests.utils import override_global_config @pytest.fixture -def tracer(monkeypatch): - monkeypatch.setenv("DD_TRACE_STARTUP_LOGS", "0") - return ddtrace.trace.Tracer() +def tracer(): + with override_global_config(dict(_startup_logs_enabled=False)): + yield ddtrace.trace.tracer @pytest.fixture diff --git a/tests/profiling_v2/collector/conftest.py b/tests/profiling_v2/collector/conftest.py index 311c286c11e..7dc1d816091 100644 --- a/tests/profiling_v2/collector/conftest.py +++ b/tests/profiling_v2/collector/conftest.py @@ -5,4 +5,4 @@ @pytest.fixture def tracer(): - return ddtrace.trace.Tracer() + return ddtrace.trace.tracer diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_ai21_llm_sync.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_ai21_llm_sync.json new file mode 100644 index 00000000000..c654c4a99f9 --- /dev/null +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_ai21_llm_sync.json @@ -0,0 +1,57 @@ +[[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_community.llms.ai21.AI21", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca8f00000000", + "langchain.request.ai21.parameters.countPenalty.applyToEmojis": "True", + "langchain.request.ai21.parameters.countPenalty.applyToNumbers": "True", + "langchain.request.ai21.parameters.countPenalty.applyToPunctuations": "True", + "langchain.request.ai21.parameters.countPenalty.applyToStopwords": "True", + "langchain.request.ai21.parameters.countPenalty.applyToWhitespaces": "True", + "langchain.request.ai21.parameters.countPenalty.scale": "0", + "langchain.request.ai21.parameters.frequencyPenalty.applyToEmojis": "True", + "langchain.request.ai21.parameters.frequencyPenalty.applyToNumbers": "True", + "langchain.request.ai21.parameters.frequencyPenalty.applyToPunctuations": "True", + "langchain.request.ai21.parameters.frequencyPenalty.applyToStopwords": "True", + "langchain.request.ai21.parameters.frequencyPenalty.applyToWhitespaces": "True", + "langchain.request.ai21.parameters.frequencyPenalty.scale": "0", + "langchain.request.ai21.parameters.logitBias": "None", + "langchain.request.ai21.parameters.maxTokens": "256", + "langchain.request.ai21.parameters.minTokens": "0", + "langchain.request.ai21.parameters.model": "j2-jumbo-instruct", + "langchain.request.ai21.parameters.numResults": "1", + "langchain.request.ai21.parameters.presencePenalty.applyToEmojis": "True", + "langchain.request.ai21.parameters.presencePenalty.applyToNumbers": "True", + "langchain.request.ai21.parameters.presencePenalty.applyToPunctuations": "True", + "langchain.request.ai21.parameters.presencePenalty.applyToStopwords": "True", + "langchain.request.ai21.parameters.presencePenalty.applyToWhitespaces": "True", + "langchain.request.ai21.parameters.presencePenalty.scale": "0", + "langchain.request.ai21.parameters.temperature": "0.7", + "langchain.request.ai21.parameters.topP": "1.0", + "langchain.request.api_key": "...key>", + "langchain.request.model": "j2-jumbo-instruct", + "langchain.request.prompts.0": "Why does everyone in Bikini Bottom hate Plankton?", + "langchain.request.provider": "ai21", + "langchain.request.type": "llm", + "langchain.response.completions.0.text": "\\nPlankton is trying to steal the Krabby Patty secret formula from Mr. Krabs, so Mr. Krabs wants him gone.", + "language": "python", + "runtime-id": "8eb5b60d8a5747e8b4b74b5a0bd313b5" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 84563 + }, + "duration": 4171000, + "start": 1738263183039739000 + }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke.json index 5333a0d89f9..30b9ca6fa50 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke.json @@ -10,22 +10,22 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a1048f00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.input": "2", "langchain.request.tool.description": "Circumference calculator(radius: float) -> float - Use this tool when you need to calculate a circumference using the radius of a circle", "langchain.request.tool.name": "Circumference calculator", "langchain.request.type": "tool", "langchain.response.output": "12.566370614359172", "language": "python", - "runtime-id": "375dd84ea2654a50a7576ceaf3ee3504" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 97648 + "process_id": 82010 }, - "duration": 210000, - "start": 1738605711785925000 + "duration": 180000, + "start": 1738263109266159000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke_non_json_serializable_config.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke_non_json_serializable_config.json index a66ad637894..ed42f879f22 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke_non_json_serializable_config.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_base_tool_invoke_non_json_serializable_config.json @@ -10,23 +10,23 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a1049000000000", - "langchain.request.config": "{\"unserializable\": \"[Unserializable object: ]\"}", + "_dd.p.tid": "679bca4500000000", + "langchain.request.config": "{\"unserializable\": \"[Unserializable object: ]\"}", "langchain.request.input": "2", "langchain.request.tool.description": "Circumference calculator(radius: float) -> float - Use this tool when you need to calculate a circumference using the radius of a circle", "langchain.request.tool.name": "Circumference calculator", "langchain.request.type": "tool", "langchain.response.output": "12.566370614359172", "language": "python", - "runtime-id": "375dd84ea2654a50a7576ceaf3ee3504" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 97648 + "process_id": 82010 }, - "duration": 365000, - "start": 1738605712478682000 + "duration": 370000, + "start": 1738263109282464000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_cohere_llm_sync.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_cohere_llm_sync.json new file mode 100644 index 00000000000..543abb80372 --- /dev/null +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_cohere_llm_sync.json @@ -0,0 +1,31 @@ +[[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_cohere.llms.Cohere", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca6900000000", + "langchain.request.api_key": "...key>", + "langchain.request.prompts.0": "What is the secret Krabby Patty recipe?", + "langchain.request.provider": "cohere", + "langchain.request.type": "llm", + "langchain.response.completions.0.text": " The secret Krabby Patty recipe has been kept under strict security measures and has never been publicly revealed. While there i...", + "language": "python", + "runtime-id": "b66738590cc84f2cbdf48350b6a67fae" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 83375 + }, + "duration": 2707000, + "start": 1738263145040994000 + }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_faiss_vectorstore_retrieval.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_faiss_vectorstore_retrieval.json new file mode 100644 index 00000000000..f60f389d1fc --- /dev/null +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_faiss_vectorstore_retrieval.json @@ -0,0 +1,89 @@ +[[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_openai.embeddings.base.OpenAIEmbeddings", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca4500000000", + "langchain.request.api_key": "...key>", + "langchain.request.inputs.0.text": "this is a test query.", + "langchain.request.model": "text-embedding-ada-002", + "langchain.request.provider": "openai", + "langchain.request.type": "embedding", + "language": "python", + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "langchain.request.input_count": 1, + "langchain.response.outputs.0.embedding_length": 1536, + "process_id": 82010 + }, + "duration": 88000, + "start": 1738263109998107000 + }], +[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_community.vectorstores.faiss.FAISS", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca4600000000", + "langchain.request.api_key": "", + "langchain.request.provider": "faiss", + "langchain.request.query": "What was the message of the last test query?", + "langchain.request.type": "similarity_search", + "langchain.response.document.0.page_content": "this is a test query.", + "language": "python", + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "langchain.response.document_count": 1, + "process_id": 82010 + }, + "duration": 1453000, + "start": 1738263110003391000 + }, + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_openai.embeddings.base.OpenAIEmbeddings", + "trace_id": 1, + "span_id": 2, + "parent_id": 1, + "type": "", + "error": 0, + "meta": { + "langchain.request.api_key": "...key>", + "langchain.request.inputs.0.text": "What was the message of the last test query?", + "langchain.request.model": "text-embedding-ada-002", + "langchain.request.provider": "openai", + "langchain.request.type": "embedding" + }, + "metrics": { + "_dd.measured": 1, + "langchain.request.input_count": 1, + "langchain.response.outputs.0.embedding_length": 1536 + }, + "duration": 39000, + "start": 1738263110003495000 + }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_document.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_document.json index 4065277d9ff..f4e7b9ae63f 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_document.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_document.json @@ -10,14 +10,14 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.api_key": "", "langchain.request.inputs.0.text": "foo", "langchain.request.inputs.1.text": "bar", "langchain.request.provider": "fake", "langchain.request.type": "embedding", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, @@ -27,8 +27,8 @@ "langchain.request.input_count": 2, "langchain.response.outputs.0.embedding_length": 99, "langchain.response.outputs.1.embedding_length": 99, - "process_id": 79446 + "process_id": 82010 }, - "duration": 566000, - "start": 1738605293277281000 + "duration": 79000, + "start": 1738263110027265000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_query.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_query.json index e7bd7a97575..3f5167fe3fe 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_query.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_fake_embedding_query.json @@ -10,13 +10,13 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.api_key": "", "langchain.request.inputs.0.text": "foo", "langchain.request.provider": "fake", "langchain.request.type": "embedding", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, @@ -25,8 +25,8 @@ "_sampling_priority_v1": 1, "langchain.request.input_count": 1, "langchain.response.outputs.embedding_length": 99, - "process_id": 79446 + "process_id": 82010 }, - "duration": 82000, - "start": 1738605293434786000 + "duration": 196000, + "start": 1738263109469499000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json index 6c83d5d3eff..1d3c08d174c 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json @@ -10,26 +10,28 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.inputs.0": "chickens", + "langchain.request.inputs.1": "pigs", "langchain.request.type": "chain", - "langchain.response.outputs.0": "Hello world!", + "langchain.response.outputs.0": "Why did the pig go to the casino?\\n\\nTo play the slop machine!", + "langchain.response.outputs.1": "Why did the chicken join a band? Because it had the drumsticks!", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 31, + "langchain.tokens.prompt_tokens": 28, + "langchain.tokens.total_cost": 0.00010400000000000001, + "langchain.tokens.total_tokens": 59, + "process_id": 82010 }, - "duration": 5822000, - "start": 1738605293620744000 + "duration": 12476000, + "start": 1738263110292401000 }, { "name": "langchain.request", @@ -52,16 +54,48 @@ "langchain.request.openai.parameters.temperature": "0.7", "langchain.request.provider": "openai", "langchain.request.type": "chat_model", - "langchain.response.completions.0.0.content": "Hello world!", + "langchain.response.completions.0.0.content": "Why did the pig go to the casino?\\n\\nTo play the slop machine!", "langchain.response.completions.0.0.message_type": "AIMessage" }, "metrics": { "_dd.measured": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10 + "langchain.tokens.completion_tokens": 16, + "langchain.tokens.prompt_tokens": 14, + "langchain.tokens.total_tokens": 30 }, - "duration": 2816000, - "start": 1738605293623467000 + "duration": 6742000, + "start": 1738263110297341000 + }, + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_openai.chat_models.base.ChatOpenAI", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "", + "error": 0, + "meta": { + "langchain.request.api_key": "...key>", + "langchain.request.messages.0.0.content": "Tell me a short joke about pigs", + "langchain.request.messages.0.0.message_type": "HumanMessage", + "langchain.request.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model_name": "gpt-3.5-turbo", + "langchain.request.openai.parameters.n": "1", + "langchain.request.openai.parameters.stream": "False", + "langchain.request.openai.parameters.temperature": "0.7", + "langchain.request.provider": "openai", + "langchain.request.type": "chat_model", + "langchain.response.completions.0.0.content": "Why did the chicken join a band? Because it had the drumsticks!", + "langchain.response.completions.0.0.message_type": "AIMessage" + }, + "metrics": { + "_dd.measured": 1, + "langchain.tokens.completion_tokens": 15, + "langchain.tokens.prompt_tokens": 14, + "langchain.tokens.total_tokens": 29 + }, + "duration": 3314000, + "start": 1738263110300645000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json index c52e74eea58..ed0c9e0f55d 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json @@ -10,27 +10,26 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.inputs.0.style": "a 90s rapper", "langchain.request.inputs.0.topic": "chickens", "langchain.request.type": "chain", - "langchain.response.outputs.0": "Hello world!", + "langchain.response.outputs.0": "Why did the chicken cross the road? To drop some sick rhymes on the other side!", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 19, + "langchain.tokens.prompt_tokens": 53, + "langchain.tokens.total_tokens": 72, + "process_id": 82010 }, - "duration": 12190000, - "start": 1738605293357862000 + "duration": 6933000, + "start": 1738263109330109000 }, { "name": "langchain.request", @@ -53,16 +52,15 @@ "langchain.request.openai.parameters.temperature": "0.7", "langchain.request.provider": "openai", "langchain.request.type": "chat_model", - "langchain.response.completions.0.0.content": "Hello world!", + "langchain.response.completions.0.0.content": "Why did the chicken cross the road? To drop some sick rhymes on the other side!", "langchain.response.completions.0.0.message_type": "AIMessage" }, "metrics": { "_dd.measured": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10 + "langchain.tokens.completion_tokens": 19, + "langchain.tokens.prompt_tokens": 53, + "langchain.tokens.total_tokens": 72 }, - "duration": 3521000, - "start": 1738605293366244000 + "duration": 3680000, + "start": 1738263109333162000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json new file mode 100644 index 00000000000..f63c58f92e3 --- /dev/null +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json @@ -0,0 +1,123 @@ +[[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_core.runnables.base.RunnableSequence", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca4400000000", + "langchain.request.inputs.0.language": "Spanish", + "langchain.request.inputs.0.person": "Spongebob Squarepants", + "langchain.request.type": "chain", + "langchain.response.outputs.0": "La ciudad ficticia de Fondo de Bikini, de la serie de televisi\u00f3n Spongebob Squarepants, est\u00e1 ubicada en el fondo del mar en un l...", + "language": "python", + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "langchain.tokens.completion_tokens": 53, + "langchain.tokens.prompt_tokens": 50, + "langchain.tokens.total_tokens": 103, + "process_id": 82010 + }, + "duration": 76825000, + "start": 1738263108943331000 + }, + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_core.runnables.base.RunnableSequence", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "", + "error": 0, + "meta": { + "langchain.request.inputs.0.language": "Spanish", + "langchain.request.inputs.0.person": "Spongebob Squarepants", + "langchain.request.type": "chain", + "langchain.response.outputs.0": "Spongebob Squarepants is from the fictional underwater city of Bikini Bottom." + }, + "metrics": { + "_dd.measured": 1, + "langchain.tokens.completion_tokens": 16, + "langchain.tokens.prompt_tokens": 18, + "langchain.tokens.total_tokens": 34 + }, + "duration": 63808000, + "start": 1738263108951960000 + }, + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_openai.chat_models.base.ChatOpenAI", + "trace_id": 0, + "span_id": 4, + "parent_id": 2, + "type": "", + "error": 0, + "meta": { + "langchain.request.api_key": "...key>", + "langchain.request.messages.0.0.content": "what is the city Spongebob Squarepants is from?", + "langchain.request.messages.0.0.message_type": "HumanMessage", + "langchain.request.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model_name": "gpt-3.5-turbo", + "langchain.request.openai.parameters.n": "1", + "langchain.request.openai.parameters.stream": "False", + "langchain.request.openai.parameters.temperature": "0.7", + "langchain.request.provider": "openai", + "langchain.request.type": "chat_model", + "langchain.response.completions.0.0.content": "Spongebob Squarepants is from the fictional underwater city of Bikini Bottom.", + "langchain.response.completions.0.0.message_type": "AIMessage" + }, + "metrics": { + "_dd.measured": 1, + "langchain.tokens.completion_tokens": 16, + "langchain.tokens.prompt_tokens": 18, + "langchain.tokens.total_tokens": 34 + }, + "duration": 61552000, + "start": 1738263108953638000 + }, + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_openai.chat_models.base.ChatOpenAI", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "", + "error": 0, + "meta": { + "langchain.request.api_key": "...key>", + "langchain.request.messages.0.0.content": "what country is the city Spongebob Squarepants is from the fictional underwater city of Bikini Bottom. in? respond in Spanish", + "langchain.request.messages.0.0.message_type": "HumanMessage", + "langchain.request.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model_name": "gpt-3.5-turbo", + "langchain.request.openai.parameters.n": "1", + "langchain.request.openai.parameters.stream": "False", + "langchain.request.openai.parameters.temperature": "0.7", + "langchain.request.provider": "openai", + "langchain.request.type": "chat_model", + "langchain.response.completions.0.0.content": "La ciudad ficticia de Fondo de Bikini, de la serie de televisi\u00f3n Spongebob Squarepants, est\u00e1 ubicada en el fondo del mar en un l...", + "langchain.response.completions.0.0.message_type": "AIMessage" + }, + "metrics": { + "_dd.measured": 1, + "langchain.tokens.completion_tokens": 37, + "langchain.tokens.prompt_tokens": 32, + "langchain.tokens.total_tokens": 69 + }, + "duration": 3288000, + "start": 1738263109016607000 + }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_non_dict_input.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_non_dict_input.json index 8d478353dc8..4e68c9c8505 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_non_dict_input.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_non_dict_input.json @@ -10,20 +10,20 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a107c600000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.inputs.0": "1", "langchain.request.type": "chain", "langchain.response.outputs.0": "4", "language": "python", - "runtime-id": "c7b53f5ce8894d4a875e908b1042a51f" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 30432 + "process_id": 82010 }, - "duration": 3373000, - "start": 1738606534803425000 + "duration": 3723000, + "start": 1738263109042519000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json index 321736aee63..66254abefd3 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json @@ -10,26 +10,25 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.inputs.0.input": "how can langsmith help with testing?", "langchain.request.type": "chain", - "langchain.response.outputs.0": "I am a helpful assistant.", + "langchain.response.outputs.0": "\\nSystem: Langsmith can help with testing in several ways. First, it can generate automated tests based on your technical docume...", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 101, + "langchain.tokens.prompt_tokens": 20, + "langchain.tokens.total_tokens": 121, + "process_id": 82010 }, - "duration": 28199000, - "start": 1738605293192401000 + "duration": 8281000, + "start": 1738263109118903000 }, { "name": "langchain.request", @@ -53,17 +52,16 @@ "langchain.request.prompts.0": "System: You are world class technical documentation writer.\\nHuman: how can langsmith help with testing?", "langchain.request.provider": "openai", "langchain.request.type": "llm", - "langchain.response.completions.0.finish_reason": "length", + "langchain.response.completions.0.finish_reason": "stop", "langchain.response.completions.0.logprobs": "None", - "langchain.response.completions.0.text": "I am a helpful assistant." + "langchain.response.completions.0.text": "\\nSystem: Langsmith can help with testing in several ways. First, it can generate automated tests based on your technical docume..." }, "metrics": { "_dd.measured": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10 + "langchain.tokens.completion_tokens": 101, + "langchain.tokens.prompt_tokens": 20, + "langchain.tokens.total_tokens": 121 }, - "duration": 23218000, - "start": 1738605293197272000 + "duration": 6142000, + "start": 1738263109120997000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json index 4a87593efa2..35d458d43f5 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json @@ -10,26 +10,25 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.inputs.0.input": "how can langsmith help with testing?", "langchain.request.type": "chain", - "langchain.response.outputs.0": "I am a helpful assistant.", + "langchain.response.outputs.0": "\\nSystem: Langsmith's advanced natural language processing technology can assist with testing by automatically generating test c...", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 78, + "langchain.tokens.prompt_tokens": 20, + "langchain.tokens.total_tokens": 98, + "process_id": 82010 }, - "duration": 4340000, - "start": 1738605293659519000 + "duration": 4689000, + "start": 1738263109680310000 }, { "name": "langchain.request", @@ -53,17 +52,16 @@ "langchain.request.prompts.0": "System: You are world class technical documentation writer.\\nHuman: how can langsmith help with testing?", "langchain.request.provider": "openai", "langchain.request.type": "llm", - "langchain.response.completions.0.finish_reason": "length", + "langchain.response.completions.0.finish_reason": "stop", "langchain.response.completions.0.logprobs": "None", - "langchain.response.completions.0.text": "I am a helpful assistant." + "langchain.response.completions.0.text": "\\nSystem: Langsmith's advanced natural language processing technology can assist with testing by automatically generating test c..." }, "metrics": { "_dd.measured": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10 + "langchain.tokens.completion_tokens": 78, + "langchain.tokens.prompt_tokens": 20, + "langchain.tokens.total_tokens": 98 }, - "duration": 2836000, - "start": 1738605293660858000 + "duration": 3399000, + "start": 1738263109681498000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_anthropic.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_anthropic.json new file mode 100644 index 00000000000..5ccb386a636 --- /dev/null +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_anthropic.json @@ -0,0 +1,46 @@ +[[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_anthropic.chat_models.ChatAnthropic", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca4600000000", + "langchain.request.anthropic.parameters.default_request_timeout": "None", + "langchain.request.anthropic.parameters.max_retries": "2", + "langchain.request.anthropic.parameters.max_tokens": "1024", + "langchain.request.anthropic.parameters.model": "claude-3-opus-20240229", + "langchain.request.anthropic.parameters.streaming": "False", + "langchain.request.anthropic.parameters.temperature": "1.0", + "langchain.request.anthropic.parameters.top_k": "None", + "langchain.request.anthropic.parameters.top_p": "None", + "langchain.request.api_key": "...key>", + "langchain.request.messages.0.0.content": "What is the sum of 1 and 2?", + "langchain.request.messages.0.0.message_type": "HumanMessage", + "langchain.request.model": "claude-3-opus-20240229", + "langchain.request.provider": "anthropic", + "langchain.request.type": "chat_model", + "langchain.response.completions.0.0.content": "\\nThe user is asking to find the sum of 1 and 2. The relevant tool to answer this question is the \"add\" tool, which ta...", + "langchain.response.completions.0.0.message_type": "AIMessage", + "langchain.response.completions.0.0.tool_calls.0.args.a": "1", + "langchain.response.completions.0.0.tool_calls.0.args.b": "2", + "langchain.response.completions.0.0.tool_calls.0.id": "toolu_01QLnLDPgrFLbBWTSCju4uao", + "langchain.response.completions.0.0.tool_calls.0.name": "add", + "language": "python", + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 82010 + }, + "duration": 18026000, + "start": 1738263110454272000 + }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json index f0a34eb3f14..99c9f2d5c3e 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a10a7c00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.api_key": "...key>", "langchain.request.messages.0.0.content": "What is the sum of 1 and 2?", "langchain.request.messages.0.0.message_type": "HumanMessage", @@ -22,26 +22,24 @@ "langchain.request.openai.parameters.temperature": "0.7", "langchain.request.provider": "openai", "langchain.request.type": "chat_model", - "langchain.response.completions.0.0.content": "Hello world!", "langchain.response.completions.0.0.message_type": "AIMessage", "langchain.response.completions.0.0.tool_calls.0.args.a": "1", "langchain.response.completions.0.0.tool_calls.0.args.b": "2", - "langchain.response.completions.0.0.tool_calls.0.id": "bar", + "langchain.response.completions.0.0.tool_calls.0.id": "call_HlX90uizLM5c3ZHoMkRB1tPt", "langchain.response.completions.0.0.tool_calls.0.name": "add", "language": "python", - "runtime-id": "1e9b5e5fdbac456fa0a7b7a044246545" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 58013 + "langchain.tokens.completion_tokens": 32, + "langchain.tokens.prompt_tokens": 85, + "langchain.tokens.total_tokens": 117, + "process_id": 82010 }, - "duration": 3397000, - "start": 1738607228376160000 + "duration": 8071000, + "start": 1738263109237865000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json new file mode 100644 index 00000000000..55b112ca6b6 --- /dev/null +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json @@ -0,0 +1,51 @@ +[[ + { + "name": "langchain.request", + "service": "tests.contrib.langchain", + "resource": "langchain_openai.chat_models.base.ChatOpenAI", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "679bca4500000000", + "langchain.request.api_key": "...key>", + "langchain.request.messages.0.0.content": "Respond like a frat boy.", + "langchain.request.messages.0.0.message_type": "SystemMessage", + "langchain.request.messages.0.1.content": "Where's the nearest equinox gym from Hudson Yards manhattan?", + "langchain.request.messages.0.1.message_type": "HumanMessage", + "langchain.request.messages.1.0.content": "Respond with a pirate accent.", + "langchain.request.messages.1.0.message_type": "SystemMessage", + "langchain.request.messages.1.1.content": "How does one get to Bikini Bottom from New York?", + "langchain.request.messages.1.1.message_type": "HumanMessage", + "langchain.request.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.max_tokens": "256", + "langchain.request.openai.parameters.model": "gpt-3.5-turbo", + "langchain.request.openai.parameters.model_name": "gpt-3.5-turbo", + "langchain.request.openai.parameters.n": "1", + "langchain.request.openai.parameters.stream": "False", + "langchain.request.openai.parameters.temperature": "0.0", + "langchain.request.provider": "openai", + "langchain.request.type": "chat_model", + "langchain.response.completions.0.0.content": "Bro, Equinox Hudson Yards is just a few blocks away from the main entrance, you can't miss it! Get your pump on and show those w...", + "langchain.response.completions.0.0.message_type": "AIMessage", + "langchain.response.completions.1.0.content": "Arrr matey, ye be needin' to sail the high seas to reach Bikini Bottom from New York! Hoist the sails, set a course for the east...", + "langchain.response.completions.1.0.message_type": "AIMessage", + "language": "python", + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "langchain.tokens.completion_tokens": 150, + "langchain.tokens.prompt_tokens": 60, + "langchain.tokens.total_tokens": 210, + "process_id": 82010 + }, + "duration": 7706000, + "start": 1738263109445840000 + }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json index 6fc60877ae4..0d4c995aba4 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.api_key": "...key>", "langchain.request.messages.0.0.content": "When do you use 'whom' instead of 'who'?", "langchain.request.messages.0.0.message_type": "HumanMessage", @@ -23,22 +23,21 @@ "langchain.request.openai.parameters.temperature": "0.0", "langchain.request.provider": "openai", "langchain.request.type": "chat_model", - "langchain.response.completions.0.0.content": "Hello world!", + "langchain.response.completions.0.0.content": "'Who' is used as a subject pronoun, while 'whom' is used as an object pronoun. \\n\\nYou use 'who' when referring to the subject o...", "langchain.response.completions.0.0.message_type": "AIMessage", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 83, + "langchain.tokens.prompt_tokens": 20, + "langchain.tokens.total_tokens": 103, + "process_id": 82010 }, - "duration": 2770000, - "start": 1738605293545467000 + "duration": 4431000, + "start": 1738263110097279000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json index 4b00d11757e..6e188b6629c 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a257a700000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.api_key": "...key>", "langchain.request.messages.0.0.content": "Respond like a frat boy.", "langchain.request.messages.0.0.message_type": "SystemMessage", @@ -21,35 +21,31 @@ "langchain.request.messages.1.1.content": "How does one get to Bikini Bottom from New York?", "langchain.request.messages.1.1.message_type": "HumanMessage", "langchain.request.model": "gpt-3.5-turbo", - "langchain.request.openai.parameters.max_completion_tokens": "256", + "langchain.request.openai.parameters.max_tokens": "256", "langchain.request.openai.parameters.model": "gpt-3.5-turbo", "langchain.request.openai.parameters.model_name": "gpt-3.5-turbo", + "langchain.request.openai.parameters.n": "1", "langchain.request.openai.parameters.stream": "False", "langchain.request.openai.parameters.temperature": "0.0", "langchain.request.provider": "openai", "langchain.request.type": "chat_model", - "langchain.response.completions.0.0.content": "Hello world!", + "langchain.response.completions.0.0.content": "Bro, Equinox Hudson Yards is just a few blocks away from the main entrance, you can't miss it! Get your pump on and show those w...", "langchain.response.completions.0.0.message_type": "AIMessage", - "langchain.response.completions.0.1.content": "Hello world!", - "langchain.response.completions.0.1.message_type": "AIMessage", - "langchain.response.completions.1.0.content": "Hello world!", + "langchain.response.completions.1.0.content": "Arrr matey, ye be needin' to sail the high seas to reach Bikini Bottom from New York! Hoist the sails, chart a course, and bewar...", "langchain.response.completions.1.0.message_type": "AIMessage", - "langchain.response.completions.1.1.content": "Hello world!", - "langchain.response.completions.1.1.message_type": "AIMessage", "language": "python", - "runtime-id": "17c27aac7afb415fa2d87b3f3bf1378c" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 10, - "langchain.tokens.prompt_tokens": 10, - "langchain.tokens.total_cost": 3.5000000000000004e-05, - "langchain.tokens.total_tokens": 20, - "process_id": 1518 + "langchain.tokens.completion_tokens": 110, + "langchain.tokens.prompt_tokens": 60, + "langchain.tokens.total_tokens": 170, + "process_id": 82010 }, - "duration": 6479000, - "start": 1738692519215022000 + "duration": 7357000, + "start": 1738263109749677000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json index 033314cedc1..d02c31a219a 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a29bfa00000000", + "_dd.p.tid": "679be13400000000", "langchain.request.api_key": "...key>", "langchain.request.messages.0.0.content": "[{'type': 'text', 'text': 'What\u2019s in this image?'}, {'type': 'image_url', 'image_url': {'url': 'https://upload.wikimedia.org/wik...", "langchain.request.messages.0.0.message_type": "HumanMessage", @@ -26,7 +26,7 @@ "langchain.response.completions.0.0.content": "The image shows a scenic landscape with a wooden boardwalk path leading through a lush green field. The sky is blue with scatter...", "langchain.response.completions.0.0.message_type": "AIMessage", "language": "python", - "runtime-id": "d54bd858212b42c686edd68387960fe9" + "runtime-id": "19b72b45843c4dc58a27069788eee8d3" }, "metrics": { "_dd.measured": 1, @@ -36,8 +36,8 @@ "langchain.tokens.completion_tokens": 56, "langchain.tokens.prompt_tokens": 1151, "langchain.tokens.total_tokens": 1207, - "process_id": 65725 + "process_id": 34354 }, - "duration": 25267000, - "start": 1738710010339445000 + "duration": 33752000, + "start": 1738268980686452000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_embedding_query.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_embedding_query.json index 7bb26a79063..3690d1b2c38 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_embedding_query.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_embedding_query.json @@ -10,14 +10,14 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.api_key": "...key>", "langchain.request.inputs.0.text": "this is a test query.", "langchain.request.model": "text-embedding-ada-002", "langchain.request.provider": "openai", "langchain.request.type": "embedding", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, @@ -26,8 +26,8 @@ "_sampling_priority_v1": 1, "langchain.request.input_count": 1, "langchain.response.outputs.embedding_length": 1536, - "process_id": 79446 + "process_id": 82010 }, - "duration": 80000, - "start": 1738605293507944000 + "duration": 77000, + "start": 1738263110174027000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json index 3790e61ad20..5ef01923f7b 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.api_key": "...key>", "langchain.request.model": "gpt-3.5-turbo-instruct", "langchain.request.openai.parameters.frequency_penalty": "0", @@ -23,23 +23,22 @@ "langchain.request.prompts.0": "Which team won the 2019 NBA finals?", "langchain.request.provider": "openai", "langchain.request.type": "llm", - "langchain.response.completions.0.finish_reason": "length", + "langchain.response.completions.0.finish_reason": "stop", "langchain.response.completions.0.logprobs": "None", - "langchain.response.completions.0.text": "I am a helpful assistant.", + "langchain.response.completions.0.text": "\\n\\nThe Toronto Raptors won the 2019 NBA Finals.", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 12, + "langchain.tokens.prompt_tokens": 10, + "langchain.tokens.total_tokens": 22, + "process_id": 82010 }, - "duration": 3067000, - "start": 1738605293256880000 + "duration": 3471000, + "start": 1738263109367504000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_error.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_error.json index f20bd79b527..19d5b71e02e 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_error.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_error.json @@ -10,9 +10,9 @@ "error": 1, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", - "error.message": "Error code: 400 - {'error': {'message': 'Invalid token in prompt: 123. Minimum value is 0, maximum value is 100257 (inclusive).', 'type': 'invalid_request_error', 'param': None, 'code': None}}", - "error.stack": "Traceback (most recent call last):\n File \"/Users/sam.brenner/dd/dd-trace-py/ddtrace/contrib/internal/langchain/patch.py\", line 221, in traced_llm_generate\n completions = func(*args, **kwargs)\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/langchain_core/language_models/llms.py\", line 803, in generate\n output = self._generate_helper(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/langchain_core/language_models/llms.py\", line 670, in _generate_helper\n raise e\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/langchain_core/language_models/llms.py\", line 657, in _generate_helper\n self._generate(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/langchain_openai/llms/base.py\", line 350, in _generate\n response = self.client.create(prompt=_prompts, **params)\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/openai/_utils/_utils.py\", line 277, in wrapper\n return func(*args, **kwargs)\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/openai/resources/completions.py\", line 528, in create\n return self._post(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/openai/_base_client.py\", line 1240, in post\n return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/openai/_base_client.py\", line 921, in request\n return self._request(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_1c8a6651bc4755ef/lib/python3.10/site-packages/openai/_base_client.py\", line 1020, in _request\n raise self._make_status_error_from_response(err.response) from None\nopenai.BadRequestError: Error code: 400 - {'error': {'message': 'Invalid token in prompt: 123456. Minimum value is 0, maximum value is 100257 (inclusive).', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", + "_dd.p.tid": "679bca4500000000", + "error.message": "Error code: 400 - {'error': {'message': 'Invalid token in prompt: 123456. Minimum value is 0, maximum value is 100257 (inclusive).', 'type': 'invalid_request_error', 'param': None, 'code': None}}", + "error.stack": "Traceback (most recent call last):\n File \"/Users/sam.brenner/dd/dd-trace-py/ddtrace/contrib/internal/langchain/patch.py\", line 221, in traced_llm_generate\n completions = func(*args, **kwargs)\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/langchain_core/language_models/llms.py\", line 803, in generate\n output = self._generate_helper(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/langchain_core/language_models/llms.py\", line 670, in _generate_helper\n raise e\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/langchain_core/language_models/llms.py\", line 657, in _generate_helper\n self._generate(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/langchain_openai/llms/base.py\", line 350, in _generate\n response = self.client.create(prompt=_prompts, **params)\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/openai/_utils/_utils.py\", line 277, in wrapper\n return func(*args, **kwargs)\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/openai/resources/completions.py\", line 528, in create\n return self._post(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/openai/_base_client.py\", line 1240, in post\n return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/openai/_base_client.py\", line 921, in request\n return self._request(\n File \"/Users/sam.brenner/dd/dd-trace-py/.riot/venv_py31013_19f22257743a59a1/lib/python3.10/site-packages/openai/_base_client.py\", line 1020, in _request\n raise self._make_status_error_from_response(err.response) from None\nopenai.BadRequestError: Error code: 400 - {'error': {'message': 'Invalid token in prompt: 123456. Minimum value is 0, maximum value is 100257 (inclusive).', 'type': 'invalid_request_error', 'param': None, 'code': None}}\n", "error.type": "openai.BadRequestError", "langchain.request.api_key": "...key>", "langchain.request.model": "gpt-3.5-turbo-instruct", @@ -28,15 +28,15 @@ "langchain.request.provider": "openai", "langchain.request.type": "llm", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 79446 + "process_id": 82010 }, - "duration": 5955000, - "start": 1738605293693225000 + "duration": 7324000, + "start": 1738263109621524000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json index 91924f2eb6c..6c25e449a96 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a102ed00000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.api_key": "...key>", "langchain.request.model": "gpt-3.5-turbo-instruct", "langchain.request.openai.parameters.frequency_penalty": "0", @@ -25,21 +25,20 @@ "langchain.request.type": "llm", "langchain.response.completions.0.finish_reason": "length", "langchain.response.completions.0.logprobs": "None", - "langchain.response.completions.0.text": "I am a helpful assistant.", + "langchain.response.completions.0.text": "\\n\\nDescartes, a 17th-century French philosopher, is known for his famous statement \"Cogito, ergo sum\" which is often translated...", "language": "python", - "runtime-id": "78a3ac00072b4dbd85dcb18195090178" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 79446 + "langchain.tokens.completion_tokens": 256, + "langchain.tokens.prompt_tokens": 17, + "langchain.tokens.total_tokens": 273, + "process_id": 82010 }, - "duration": 8466000, - "start": 1738605293578308000 + "duration": 3387000, + "start": 1738263110205796000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json index 05072912a1f..9d5f107c31b 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a2578700000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.api_key": "...key>", "langchain.request.model": "gpt-3.5-turbo-instruct", "langchain.request.openai.parameters.frequency_penalty": "0", @@ -26,24 +26,23 @@ "langchain.request.type": "llm", "langchain.response.completions.0.finish_reason": "length", "langchain.response.completions.0.logprobs": "None", - "langchain.response.completions.0.text": "I am a helpful assistant.", - "langchain.response.completions.1.finish_reason": "length", + "langchain.response.completions.0.text": "\\n\\n1. Start early: It is easier for babies to learn multiple languages when exposed to them from a young age. Babies are born w...", + "langchain.response.completions.1.finish_reason": "stop", "langchain.response.completions.1.logprobs": "None", - "langchain.response.completions.1.text": "I am a helpful assistant.", + "langchain.response.completions.1.text": "\\n\\nSpongebob has failed his road test at least 26 times.", "language": "python", - "runtime-id": "c64df646b0274caaa34422a7b07e64a6" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "langchain.tokens.completion_tokens": 5, - "langchain.tokens.prompt_tokens": 5, - "langchain.tokens.total_cost": 1.7500000000000002e-05, - "langchain.tokens.total_tokens": 10, - "process_id": 99901 + "langchain.tokens.completion_tokens": 271, + "langchain.tokens.prompt_tokens": 23, + "langchain.tokens.total_tokens": 294, + "process_id": 82010 }, - "duration": 2862000, - "start": 1738692487893691000 + "duration": 3334000, + "start": 1738263110060841000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_pinecone_vectorstore_similarity_search.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_pinecone_vectorstore_similarity_search.json index 96d290737e4..04022afed1f 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_pinecone_vectorstore_similarity_search.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_pinecone_vectorstore_similarity_search.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a2982700000000", + "_dd.p.tid": "679bca4900000000", "langchain.request.api_key": "", "langchain.request.k": "1", "langchain.request.provider": "pineconevectorstore", @@ -22,7 +22,7 @@ "langchain.response.document.0.metadata.wiki-id": "13", "langchain.response.document.0.page_content": "A brilliant mathematician and cryptographer Alan was to become the founder of modern-day computer science and artificial intelli...", "language": "python", - "runtime-id": "afffb6f62c04469c8e03e8d823a886b7" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, @@ -30,10 +30,10 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, "langchain.response.document_count": 1, - "process_id": 57708 + "process_id": 82010 }, - "duration": 1832000, - "start": 1738709031164723000 + "duration": 2112000, + "start": 1738263113379347000 }, { "name": "langchain.request", @@ -56,6 +56,6 @@ "langchain.request.input_count": 1, "langchain.response.outputs.embedding_length": 1536 }, - "duration": 467000, - "start": 1738709031164817000 + "duration": 43000, + "start": 1738263113379475000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chain.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chain.json index d9cbe0c9d78..d03f7bc4722 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chain.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chain.json @@ -10,23 +10,23 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a1030300000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.inputs.0.input": "how can langsmith help with testing?", "langchain.request.stream": "True", "langchain.request.type": "chain", "langchain.response.outputs": "Python is\\n\\nthe best!", "language": "python", - "runtime-id": "92329b8f7db64bac85cd06279f8abe20" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 80460 + "process_id": 82010 }, - "duration": 5690000, - "start": 1738605315956509000 + "duration": 18877000, + "start": 1738263109393591000 }, { "name": "langchain.request", @@ -58,6 +58,6 @@ "metrics": { "_dd.measured": 1 }, - "duration": 3982000, - "start": 1738605315958073000 + "duration": 6074000, + "start": 1738263109406147000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chat.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chat.json index 7254ac0ca28..0b4db6d1d29 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chat.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_chat.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a1030300000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.api_key": "...key>", "langchain.request.messages.0.content": "how can langsmith help with testing?", "langchain.request.model": "gpt-3.5-turbo", @@ -25,15 +25,15 @@ "langchain.response.content": "Python is\\n\\nthe best!", "langchain.response.message_type": "AIMessage", "language": "python", - "runtime-id": "92329b8f7db64bac85cd06279f8abe20" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 80460 + "process_id": 82010 }, - "duration": 8072000, - "start": 1738605315649808000 + "duration": 4712000, + "start": 1738263109587520000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_json_output_parser.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_json_output_parser.json index 11deede5fd6..2dc7611d537 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_json_output_parser.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_json_output_parser.json @@ -10,24 +10,24 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a10b8000000000", + "_dd.p.tid": "679bca4500000000", "langchain.request.inputs.0": "content='You know everything about the world.'", "langchain.request.inputs.1": "content='output a list of the country france their population in JSON format. Use a dict with an outer key of \"countries\" which ...", "langchain.request.stream": "True", "langchain.request.type": "chain", "langchain.response.outputs": "{\"countries\": \"France is a country!\"}", "language": "python", - "runtime-id": "2b6e3df6ccb54034be82dec730d40cea" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 68297 + "process_id": 82010 }, - "duration": 47497000, - "start": 1738607488701970000 + "duration": 15748000, + "start": 1738263109069056000 }, { "name": "langchain.request", @@ -60,6 +60,6 @@ "metrics": { "_dd.measured": 1 }, - "duration": 43222000, - "start": 1738607488705896000 + "duration": 14195000, + "start": 1738263109070356000 }]] diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_llm.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_llm.json index beeca6c3f9c..9f0e75b55f3 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_llm.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_streamed_llm.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.p.dm": "-0", - "_dd.p.tid": "67a1030300000000", + "_dd.p.tid": "679bca4600000000", "langchain.request.api_key": "...key>", "langchain.request.messages.0.content": "How do I write technical documentation?", "langchain.request.model": "gpt-3.5-turbo-instruct", @@ -26,15 +26,15 @@ "langchain.request.type": "llm", "langchain.response.content": "\\n\\nPython is cool!", "language": "python", - "runtime-id": "92329b8f7db64bac85cd06279f8abe20" + "runtime-id": "a3a3f4530e0641dcb8e323c08c91c8ce" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 80460 + "process_id": 82010 }, - "duration": 2820000, - "start": 1738605315685515000 + "duration": 3698000, + "start": 1738263110128412000 }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-None].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-None].json deleted file mode 100644 index 65eec00d960..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-None].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "ddtrace_subprocess_dir", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7100000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "8c92d3e850d9413593bf481d805039d1" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20673 - }, - "duration": 21745000, - "start": 1701268849462298000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 2999000, - "start": 1701268849479960000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v0].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v0].json deleted file mode 100644 index d6417fb5667..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v0].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "ddtrace_subprocess_dir", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7200000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "675032183b244929ba8c3a0a1c0021e5" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20696 - }, - "duration": 20412000, - "start": 1701268850764763000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 3134000, - "start": 1701268850780901000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v1].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v1].json deleted file mode 100644 index 979ea768ef5..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v1].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "ddtrace_subprocess_dir", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7400000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "1f3499a720954236be60cf0fece4246c" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20714 - }, - "duration": 19970000, - "start": 1701268852029562000 - }, - { - "name": "http.client.request", - "service": "ddtrace_subprocess_dir", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.peer.service.source": "out.host", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "peer.service": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1 - }, - "duration": 2897000, - "start": 1701268852045569000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-None].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-None].json deleted file mode 100644 index a80c1218caf..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-None].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "mysvc", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7500000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "1244eea37568412fb5bdedf9c37ed48a" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20736 - }, - "duration": 19953000, - "start": 1701268853284736000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "mysvc", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 2837000, - "start": 1701268853300833000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v0].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v0].json deleted file mode 100644 index f3f9c57f768..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v0].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "mysvc", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7600000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "12b4a711854c44f681695957b545dcf5" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20750 - }, - "duration": 25352000, - "start": 1701268854568669000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "mysvc", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 3922000, - "start": 1701268854588758000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v1].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v1].json deleted file mode 100644 index 0696ae54454..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v1].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "mysvc", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7700000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "03e7664126ea4fe99e0aefec4efd003c" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20772 - }, - "duration": 19966000, - "start": 1701268855885252000 - }, - { - "name": "http.client.request", - "service": "mysvc", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.peer.service.source": "out.host", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "peer.service": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1 - }, - "duration": 2849000, - "start": 1701268855901267000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_async.json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_async.json index 9cfd3a107cd..ab2f74aa60b 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_async.json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_async.json @@ -44,6 +44,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 24448 }, "duration": 17466000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-None].json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-None].json index 3361ea38b5c..d7faa3f22e2 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-None].json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-None].json @@ -39,6 +39,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 20806 }, "duration": 16421000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v0].json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v0].json index 9815b378221..3af343273f6 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v0].json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v0].json @@ -39,6 +39,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 20827 }, "duration": 17257000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v1].json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v1].json index 3c9e6612d78..7f51ec196a6 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v1].json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[None-v1].json @@ -39,6 +39,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 20839 }, "duration": 17259000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-None].json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-None].json index fb11e4200a0..35268ec5092 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-None].json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-None].json @@ -39,6 +39,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 20848 }, "duration": 17004000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v0].json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v0].json index 63341870faa..999dbb7529c 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v0].json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v0].json @@ -39,6 +39,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 20864 }, "duration": 17872000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v1].json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v1].json index 4ff254b053c..76d352a3f59 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v1].json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_service_name[mysvc-v1].json @@ -39,6 +39,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 20888 }, "duration": 16629000, diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_sync.json b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_sync.json index 9cfd3a107cd..ab2f74aa60b 100644 --- a/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_sync.json +++ b/tests/snapshots/tests.contrib.openai.test_openai_v1.test_integration_sync.json @@ -44,6 +44,9 @@ "openai.organization.ratelimit.requests.remaining": 2999, "openai.organization.ratelimit.tokens.limit": 250000, "openai.organization.ratelimit.tokens.remaining": 249979, + "openai.response.usage.completion_tokens": 12, + "openai.response.usage.prompt_tokens": 2, + "openai.response.usage.total_tokens": 14, "process_id": 24448 }, "duration": 17466000, diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_analytics_with_rate.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_analytics_with_rate.json similarity index 53% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_analytics_with_rate.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_analytics_with_rate.json index 2a94b63ab1c..cdc76343f08 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_analytics_with_rate.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_analytics_with_rate.json @@ -1,25 +1,25 @@ [[ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "GET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "3cf1df7fb079462ab81608355e026651", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, @@ -29,10 +29,10 @@ "_sampling_priority_v1": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 2 + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 }, - "duration": 309666, - "start": 1692650065803420466 + "duration": 516916, + "start": 1692651820581556875 }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_analytics_without_rate.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_analytics_without_rate.json similarity index 53% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_analytics_without_rate.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_analytics_without_rate.json index 40703c30619..9a2bb9f2e4f 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_analytics_without_rate.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_analytics_without_rate.json @@ -1,25 +1,25 @@ [[ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "GET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "3cf1df7fb079462ab81608355e026651", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, @@ -29,10 +29,10 @@ "_sampling_priority_v1": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 2 + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 }, - "duration": 277750, - "start": 1692650065792499174 + "duration": 340708, + "start": 1692651820591814875 }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_unicode.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_basics.json similarity index 52% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_unicode.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_basics.json index cb687c7f283..e6da74211bb 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_unicode.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_basics.json @@ -1,25 +1,25 @@ [[ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "GET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "GET \ud83d\ude10", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "3cf1df7fb079462ab81608355e026651", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, @@ -28,10 +28,10 @@ "_sampling_priority_v1": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 2 + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 }, - "duration": 211291, - "start": 1692650065781665049 + "duration": 335292, + "start": 1692651820600962708 }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_custom_cmd_length.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_custom_cmd_length.json new file mode 100644 index 00000000000..5614e912961 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_custom_cmd_length.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "GET here..." + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 326167, + "start": 1692651820609597416 + }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_basics.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_custom_cmd_length_env.json similarity index 52% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_basics.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_custom_cmd_length_env.json index 64a0dc8fcee..75f058f3700 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_basics.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_custom_cmd_length_env.json @@ -1,25 +1,25 @@ [[ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "GET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "ea409d0295db44adbf88dda3e4806547", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET here-is..." }, "metrics": { "_dd.measured": 1, @@ -28,10 +28,10 @@ "_sampling_priority_v1": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 2 + "out.valkey_db": 0, + "process_id": 20043, + "valkey.args_length": 2 }, - "duration": 199667, - "start": 1692650065770875215 + "duration": 404084, + "start": 1692651821117540958 }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema3].json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_env_user_specified_valkey_service.json similarity index 50% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema3].json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_env_user_specified_valkey_service.json index 94285cbff91..f4b7d26f3a6 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema3].json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_env_user_specified_valkey_service.json @@ -1,61 +1,62 @@ [[ { - "name": "redis.command", - "service": "redis", - "resource": "FLUSHALL", + "name": "valkey.command", + "service": "myvalkey", + "resource": "GET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "mysvc", + "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "FLUSHALL", - "runtime-id": "b99b2a231e9242f5b3e0fb971df07ef8", + "runtime-id": "e263ff9ad1cd43099216a11ca5e19377", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, + "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5155, - "redis.args_length": 1 + "out.valkey_db": 0, + "process_id": 20046, + "valkey.args_length": 2 }, - "duration": 9568708, - "start": 1692650070456463426 + "duration": 501125, + "start": 1692651821692035875 }], [ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "cfg-valkey", "resource": "GET", "trace_id": 1, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "mysvc", + "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "b99b2a231e9242f5b3e0fb971df07ef8", + "runtime-id": "e263ff9ad1cd43099216a11ca5e19377", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, @@ -64,10 +65,10 @@ "_sampling_priority_v1": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5155, - "redis.args_length": 2 + "out.valkey_db": 0, + "process_id": 20046, + "valkey.args_length": 2 }, - "duration": 304459, - "start": 1692650070493672884 + "duration": 329333, + "start": 1692651821722196292 }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_full_command_in_resource_config.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_full_command_in_resource_config.json new file mode 100644 index 00000000000..c447412ee09 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_full_command_in_resource_config.json @@ -0,0 +1,71 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET put_key_in_resource", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3a1f7ba9b1ab42f4858e5effd03877ef", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "GET put_key_in_resource" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 65639, + "valkey.args_length": 2 + }, + "duration": 2978000, + "start": 1698858795260743000 + }], +[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3a1f7ba9b1ab42f4858e5effd03877ef", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 65639, + "valkey.pipeline_length": 2 + }, + "duration": 1408000, + "start": 1698858795278553000 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_full_command_in_resource_env.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_full_command_in_resource_env.json new file mode 100644 index 00000000000..f7f89e8565a --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_full_command_in_resource_env.json @@ -0,0 +1,71 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET put_key_in_resource", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "451464ac55804a488cf355b1d96c7002", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "GET put_key_in_resource" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 65646, + "valkey.args_length": 2 + }, + "duration": 3112000, + "start": 1698858796156355000 + }], +[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "", + "_dd.p.dm": "-0", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "451464ac55804a488cf355b1d96c7002", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 65646, + "valkey.pipeline_length": 2 + }, + "duration": 1246000, + "start": 1698858796167913000 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_long_command.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_long_command.json new file mode 100644 index 00000000000..15378c706ba --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_long_command.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "MGET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "MGET 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 36..." + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 1001 + }, + "duration": 3428042, + "start": 1692651821775339875 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_meta_override.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_meta_override.json new file mode 100644 index 00000000000..5edc6b45665 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_meta_override.json @@ -0,0 +1,38 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "cheese": "camembert", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "GET cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 474500, + "start": 1692651821790889125 + }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_opentracing.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_opentracing.json similarity index 52% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_opentracing.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_opentracing.json index ccee94088be..749bd3d3307 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_opentracing.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_opentracing.json @@ -1,56 +1,56 @@ [[ { - "name": "redis_get", - "service": "redis_svc", - "resource": "redis_get", + "name": "valkey_get", + "service": "valkey_svc", + "resource": "valkey_get", "trace_id": 0, "span_id": 1, "parent_id": 0, "type": "", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", "language": "python", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596" + "runtime-id": "3cf1df7fb079462ab81608355e026651" }, "metrics": { "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 5119 + "process_id": 19999 }, - "duration": 402211, - "start": 1692650065865079296 + "duration": 534179, + "start": 1692651821803009280 }, { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "GET", "trace_id": 0, "span_id": 2, "parent_id": 1, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "out.host": "localhost", - "redis.raw_command": "GET cheese", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, "_dd.top_level": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "redis.args_length": 2 + "out.valkey_db": 0, + "valkey.args_length": 2 }, - "duration": 271333, - "start": 1692650065865181924 + "duration": 358500, + "start": 1692651821803151542 }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_pipeline_immediate.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_pipeline_immediate.json similarity index 50% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_pipeline_immediate.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_pipeline_immediate.json index d8843f3ac0c..5559b6a5959 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_pipeline_immediate.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_pipeline_immediate.json @@ -1,25 +1,25 @@ [[ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "SET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "SET a 1", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "3cf1df7fb079462ab81608355e026651", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "SET a 1" }, "metrics": { "_dd.measured": 1, @@ -27,35 +27,35 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 3 + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 3 }, - "duration": 246375, - "start": 1692650065826740341 + "duration": 343500, + "start": 1692651821823333917 }], [ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "valkey", "resource": "SET", "trace_id": 1, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "tests.contrib.valkey", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "SET a 1", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "3cf1df7fb079462ab81608355e026651", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "SET a 1" }, "metrics": { "_dd.measured": 1, @@ -63,10 +63,10 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.pipeline_length": 1 + "out.valkey_db": 0, + "process_id": 19999, + "valkey.pipeline_length": 1 }, - "duration": 181666, - "start": 1692650065827078091 + "duration": 158750, + "start": 1692651821823756750 }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_pipeline_traced.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_pipeline_traced.json new file mode 100644 index 00000000000..c5e90a181b3 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_pipeline_traced.json @@ -0,0 +1,36 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET\nRPUSH\nHGETALL", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "SET blah 32\nRPUSH foo \u00e9\u00e9\nHGETALL xxx" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.pipeline_length": 3 + }, + "duration": 589917, + "start": 1692651821833429417 + }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_cmd_max_length.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_service_precedence.json similarity index 52% rename from tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_cmd_max_length.json rename to tests/snapshots/tests.contrib.valkey.test_valkey.test_service_precedence.json index b39a54205e8..27979635427 100644 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_cmd_max_length.json +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_service_precedence.json @@ -1,25 +1,25 @@ [[ { - "name": "redis.command", - "service": "redis", + "name": "valkey.command", + "service": "env-valkey", "resource": "GET", "trace_id": 0, "span_id": 1, "parent_id": 0, - "type": "redis", + "type": "valkey", "error": 0, "meta": { - "_dd.base_service": "tests.contrib.yaaredis", + "_dd.base_service": "app-svc", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", + "component": "valkey", + "db.system": "valkey", "language": "python", "out.host": "localhost", - "redis.raw_command": "GET here...", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", + "runtime-id": "0f782133fa34462daf85cad95bb55fd2", "server.address": "localhost", - "span.kind": "client" + "span.kind": "client", + "valkey.raw_command": "GET cheese" }, "metrics": { "_dd.measured": 1, @@ -28,10 +28,10 @@ "_sampling_priority_v1": 1, "db.row_count": 0, "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 2 + "out.valkey_db": 0, + "process_id": 20052, + "valkey.args_length": 2 }, - "duration": 227917, - "start": 1692650065760107632 + "duration": 423750, + "start": 1692651822324419751 }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_unicode.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_unicode.json new file mode 100644 index 00000000000..09f6f46fd3d --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_unicode.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "GET \ud83d\ude10" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 512083, + "start": 1692651822408832834 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey.test_user_specified_service.json b/tests/snapshots/tests.contrib.valkey.test_valkey.test_user_specified_service.json new file mode 100644 index 00000000000..7a91612554e --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey.test_user_specified_service.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "mysvc", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "localhost", + "runtime-id": "9d4dd102c4394715976611e15b961233", + "server.address": "localhost", + "span.kind": "client", + "valkey.raw_command": "GET cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 20056, + "valkey.args_length": 2 + }, + "duration": 439500, + "start": 1692651822941153668 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_basic_request.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_basic_request.json new file mode 100644 index 00000000000..70b0e166d97 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_basic_request.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 595750, + "start": 1692651823036625793 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_client_name.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_client_name.json new file mode 100644 index 00000000000..4f7b2688d76 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_client_name.json @@ -0,0 +1,57 @@ +[[ + { + "name": "web-request", + "service": "test", + "resource": "web-request", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "language": "python", + "runtime-id": "3cf1df7fb079462ab81608355e026651" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 19999 + }, + "duration": 828125, + "start": 1692651823188535376 + }, + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "out.host": "127.0.0.1", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.client_name": "testing-client-name", + "valkey.raw_command": "GET blah" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "valkey.args_length": 2 + }, + "duration": 541041, + "start": 1692651823188798168 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_connection_error.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_connection_error.json new file mode 100644 index 00000000000..64609dd8614 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_connection_error.json @@ -0,0 +1,40 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 1, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "error.message": "whatever", + "error.stack": "Traceback (most recent call last):\n File \"/root/project/ddtrace/contrib/trace_utils_valkey.py\", line 117, in _trace_valkey_cmd\n yield span\n File \"/root/project/ddtrace/contrib/valkey/asyncio_patch.py\", line 22, in traced_async_execute_command\n return await _run_valkey_command_async(span=span, func=func, args=args, kwargs=kwargs)\n File \"/root/project/ddtrace/contrib/valkey/asyncio_patch.py\", line 41, in _run_valkey_command_async\n result = await func(*args, **kwargs)\n File \"/root/project/.riot/venv_py31011_mock_pytest_pytest-mock_coverage_pytest-cov_opentracing_hypothesis6451_pytest-asyncio_valkey~41/lib/python3.10/site-packages/valkey/asyncio/client.py\", line 509, in execute_command\n conn = self.connection or await pool.get_connection(command_name, **options)\n File \"/root/.pyenv/versions/3.10.11/lib/python3.10/unittest/mock.py\", line 2234, in _execute_mock_call\n raise effect\nvalkey.exceptions.ConnectionError: whatever\n", + "error.type": "valkey.exceptions.ConnectionError", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "dc59875580884b52bebd2f9c402238f8", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET foo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 2340, + "valkey.args_length": 2 + }, + "duration": 935417, + "start": 1695409673533997174 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_decoding_non_utf8_args.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_decoding_non_utf8_args.json new file mode 100644 index 00000000000..649d89db933 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_decoding_non_utf8_args.json @@ -0,0 +1,73 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET \\x80foo \\x80abc" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 3 + }, + "duration": 512917, + "start": 1692651823066497751 + }], +[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET \\x80foo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 330333, + "start": 1692651823067101001 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_decoding_non_utf8_pipeline_args.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_decoding_non_utf8_pipeline_args.json new file mode 100644 index 00000000000..c22d2347b5e --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_decoding_non_utf8_pipeline_args.json @@ -0,0 +1,36 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET\nSET\nGET\nGET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET \\x80blah boo\nSET foo \\x80abc\nGET \\x80blah\nGET foo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.pipeline_length": 4 + }, + "duration": 404709, + "start": 1692651823079707584 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_long_command.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_long_command.json new file mode 100644 index 00000000000..9f4e40ffd1b --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_long_command.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "MGET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "MGET 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 36..." + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 1001 + }, + "duration": 5689625, + "start": 1692651823091333793 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_override_service_name.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_override_service_name.json new file mode 100644 index 00000000000..f3d0bce583a --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_override_service_name.json @@ -0,0 +1,110 @@ +[[ + { + "name": "valkey.command", + "service": "myvalkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 297666, + "start": 1692651823109161293 + }], +[ + { + "name": "valkey.command", + "service": "myvalkey", + "resource": "SET", + "trace_id": 1, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET cheese my-cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 3 + }, + "duration": 230084, + "start": 1692651823109550709 + }], +[ + { + "name": "valkey.command", + "service": "myvalkey", + "resource": "GET", + "trace_id": 2, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 160875, + "start": 1692651823109840043 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_parenting.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_parenting.json new file mode 100644 index 00000000000..c9a38d7fa31 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_parenting.json @@ -0,0 +1,85 @@ +[[ + { + "name": "web-request", + "service": "test", + "resource": "web-request", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "language": "python", + "runtime-id": "3cf1df7fb079462ab81608355e026651" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 19999 + }, + "duration": 953000, + "start": 1692651823176740209 + }, + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "out.host": "127.0.0.1", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET blah boo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "valkey.args_length": 3 + }, + "duration": 270791, + "start": 1692651823176857918 + }, + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "out.host": "127.0.0.1", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET blah" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "db.row_count": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "valkey.args_length": 2 + }, + "duration": 499000, + "start": 1692651823177170168 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pin.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pin.json new file mode 100644 index 00000000000..91c995cc259 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pin.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "my-valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET cheese" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 327417, + "start": 1692651823121474251 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pipeline_traced.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pipeline_traced.json new file mode 100644 index 00000000000..e267216e24f --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pipeline_traced.json @@ -0,0 +1,36 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET\nSET\nGET\nGET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET blah boo\nSET foo bar\nGET blah\nGET foo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.pipeline_length": 4 + }, + "duration": 384125, + "start": 1692651823134602834 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pipeline_traced_context_manager_transaction.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pipeline_traced_context_manager_transaction.json new file mode 100644 index 00000000000..72633ef5e16 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_pipeline_traced_context_manager_transaction.json @@ -0,0 +1,36 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET\nSET\nGET\nGET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET blah boo\nSET foo bar\nGET blah\nGET foo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.pipeline_length": 4 + }, + "duration": 507125, + "start": 1692651823152247501 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_two_traced_pipelines.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_two_traced_pipelines.json new file mode 100644 index 00000000000..60ff68c9b1b --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_two_traced_pipelines.json @@ -0,0 +1,84 @@ +[[ + { + "name": "web-request", + "service": "test", + "resource": "web-request", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "language": "python", + "runtime-id": "3cf1df7fb079462ab81608355e026651" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 19999 + }, + "duration": 940000, + "start": 1692651823164019209 + }, + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET\nGET", + "trace_id": 0, + "span_id": 2, + "parent_id": 1, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "out.host": "127.0.0.1", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET blah boo\nGET blah" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "valkey.pipeline_length": 2 + }, + "duration": 352833, + "start": 1692651823164207293 + }, + { + "name": "valkey.command", + "service": "valkey", + "resource": "SET\nGET", + "trace_id": 0, + "span_id": 3, + "parent_id": 1, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "out.host": "127.0.0.1", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "SET foo bar\nGET foo" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "network.destination.port": 6379, + "out.valkey_db": 0, + "valkey.pipeline_length": 2 + }, + "duration": 310042, + "start": 1692651823164624126 + }]] diff --git a/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_unicode_request.json b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_unicode_request.json new file mode 100644 index 00000000000..c6db207fd51 --- /dev/null +++ b/tests/snapshots/tests.contrib.valkey.test_valkey_asyncio.test_unicode_request.json @@ -0,0 +1,37 @@ +[[ + { + "name": "valkey.command", + "service": "valkey", + "resource": "GET", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "valkey", + "error": 0, + "meta": { + "_dd.base_service": "tests.contrib.valkey", + "_dd.p.dm": "-0", + "_dd.p.tid": "654a694400000000", + "component": "valkey", + "db.system": "valkey", + "language": "python", + "out.host": "127.0.0.1", + "runtime-id": "3cf1df7fb079462ab81608355e026651", + "server.address": "127.0.0.1", + "span.kind": "client", + "valkey.raw_command": "GET \ud83d\ude10" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "db.row_count": 0, + "network.destination.port": 6379, + "out.valkey_db": 0, + "process_id": 19999, + "valkey.args_length": 2 + }, + "duration": 300041, + "start": 1692651823049427543 + }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_full_command_in_resource_config[True].json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_full_command_in_resource_config[True].json deleted file mode 100644 index f453d45ceb2..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_full_command_in_resource_config[True].json +++ /dev/null @@ -1,82 +0,0 @@ -[[ - { - "name": "web-request", - "service": "test", - "resource": "web-request", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.yaaredis", - "_dd.p.dm": "-0", - "language": "python", - "runtime-id": "8684af00a9414982b4794ddcadcd26ec" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 78504 - }, - "duration": 1603000, - "start": 1698860084361738000 - }, - { - "name": "redis.command", - "service": "redis", - "resource": "GET put_key_in_resource", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.yaaredis", - "component": "yaaredis", - "db.system": "redis", - "out.host": "localhost", - "redis.raw_command": "GET put_key_in_resource", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "redis.args_length": 2 - }, - "duration": 659000, - "start": 1698860084362141000 - }, - { - "name": "redis.command", - "service": "redis", - "resource": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2", - "trace_id": 0, - "span_id": 3, - "parent_id": 1, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.yaaredis", - "component": "yaaredis", - "db.system": "redis", - "out.host": "localhost", - "redis.raw_command": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "redis.pipeline_length": 2 - }, - "duration": 442000, - "start": 1698860084362884000 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_full_command_in_resource_env.json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_full_command_in_resource_env.json deleted file mode 100644 index 09c08bf56a0..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_full_command_in_resource_env.json +++ /dev/null @@ -1,82 +0,0 @@ -[[ - { - "name": "web-request", - "service": "test", - "resource": "web-request", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "_dd.p.dm": "-0", - "language": "python", - "runtime-id": "3c2060b13ff1469387b2c823d7d43f18" - }, - "metrics": { - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "process_id": 78507 - }, - "duration": 14945000, - "start": 1698860084269055000 - }, - { - "name": "redis.command", - "service": "redis", - "resource": "GET put_key_in_resource", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "component": "yaaredis", - "db.system": "redis", - "out.host": "localhost", - "redis.raw_command": "GET put_key_in_resource", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "redis.args_length": 2 - }, - "duration": 12481000, - "start": 1698860084269179000 - }, - { - "name": "redis.command", - "service": "redis", - "resource": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2", - "trace_id": 0, - "span_id": 3, - "parent_id": 1, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "component": "yaaredis", - "db.system": "redis", - "out.host": "localhost", - "redis.raw_command": "SET pipeline-cmd1 1\nSET pipeline-cmd2 2", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "redis.pipeline_length": 2 - }, - "duration": 2254000, - "start": 1698860084281737000 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_long_command.json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_long_command.json deleted file mode 100644 index c21f8fc51f5..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_long_command.json +++ /dev/null @@ -1,37 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "redis", - "resource": "MGET", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.yaaredis", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "MGET 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 36...", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.args_length": 1001 - }, - "duration": 4141542, - "start": 1692650065704941632 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_pipeline_traced.json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_pipeline_traced.json deleted file mode 100644 index 945d5fc508d..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_pipeline_traced.json +++ /dev/null @@ -1,36 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "redis", - "resource": "SET\nRPUSH\nHGETALL", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "tests.contrib.yaaredis", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "SET blah 32\nRPUSH foo \u00e9\u00e9\nHGETALL xxx", - "runtime-id": "4d0d479c17be4095b8e0dee5a0839596", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5119, - "redis.pipeline_length": 3 - }, - "duration": 278208, - "start": 1692650065814935466 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema0].json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema0].json deleted file mode 100644 index d541510cbcd..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema0].json +++ /dev/null @@ -1,73 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "redis", - "resource": "FLUSHALL", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "FLUSHALL", - "runtime-id": "392b844b532747b7a2327f4996a26d26", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5140, - "redis.args_length": 1 - }, - "duration": 17837625, - "start": 1692650066908323174 - }], -[ - { - "name": "redis.command", - "service": "redis", - "resource": "GET", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "392b844b532747b7a2327f4996a26d26", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5140, - "redis.args_length": 2 - }, - "duration": 308291, - "start": 1692650066953965133 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema1].json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema1].json deleted file mode 100644 index f0075f33b51..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema1].json +++ /dev/null @@ -1,73 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "redis", - "resource": "FLUSHALL", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "FLUSHALL", - "runtime-id": "f9e411adfc7a4c2cabc38392dd511dbf", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5145, - "redis.args_length": 1 - }, - "duration": 13703875, - "start": 1692650068046894758 - }], -[ - { - "name": "redis.command", - "service": "redis", - "resource": "GET", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "f9e411adfc7a4c2cabc38392dd511dbf", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5145, - "redis.args_length": 2 - }, - "duration": 337542, - "start": 1692650068093153508 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema2].json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema2].json deleted file mode 100644 index f1067bca0f9..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema2].json +++ /dev/null @@ -1,75 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "ddtrace_subprocess_dir", - "resource": "FLUSHALL", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "_dd.peer.service.source": "out.host", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "peer.service": "localhost", - "redis.command": "FLUSHALL", - "runtime-id": "6512d363c3094051ae21a945fbfcf82b", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 14223, - "redis.args_length": 1 - }, - "duration": 4711000, - "start": 1685545835531199000 - }], -[ - { - "name": "redis.command", - "service": "ddtrace_subprocess_dir", - "resource": "GET", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "_dd.peer.service.source": "out.host", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "peer.service": "localhost", - "redis.command": "GET cheese", - "runtime-id": "6512d363c3094051ae21a945fbfcf82b", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 14223, - "redis.args_length": 2 - }, - "duration": 420000, - "start": 1685545835574013000 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema4].json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema4].json deleted file mode 100644 index e0a5fe04089..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema4].json +++ /dev/null @@ -1,73 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "redis", - "resource": "FLUSHALL", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "mysvc", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "FLUSHALL", - "runtime-id": "d8705f1ada1545908f6144a2d6b15900", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5160, - "redis.args_length": 1 - }, - "duration": 10873959, - "start": 1692650071606791218 - }], -[ - { - "name": "redis.command", - "service": "redis", - "resource": "GET", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.base_service": "mysvc", - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "redis.raw_command": "GET cheese", - "runtime-id": "d8705f1ada1545908f6144a2d6b15900", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 5160, - "redis.args_length": 2 - }, - "duration": 328375, - "start": 1692650071643705427 - }]] diff --git a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema5].json b/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema5].json deleted file mode 100644 index 6be40ba43d8..00000000000 --- a/tests/snapshots/tests.contrib.yaaredis.test_yaaredis.test_schematization[service_schema5].json +++ /dev/null @@ -1,75 +0,0 @@ -[[ - { - "name": "redis.command", - "service": "mysvc", - "resource": "FLUSHALL", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "_dd.peer.service.source": "out.host", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "peer.service": "localhost", - "redis.command": "FLUSHALL", - "runtime-id": "1993bf3ea65c4b658bffdda053387eca", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 14262, - "redis.args_length": 1 - }, - "duration": 7419000, - "start": 1685545839462874000 - }], -[ - { - "name": "redis.command", - "service": "mysvc", - "resource": "GET", - "trace_id": 1, - "span_id": 1, - "parent_id": 0, - "type": "redis", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "654a694400000000", - "_dd.peer.service.source": "out.host", - "component": "yaaredis", - "db.system": "redis", - "language": "python", - "out.host": "localhost", - "peer.service": "localhost", - "redis.command": "GET cheese", - "runtime-id": "1993bf3ea65c4b658bffdda053387eca", - "server.address": "localhost", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "db.row_count": 0, - "network.destination.port": 6379, - "out.redis_db": 0, - "process_id": 14262, - "redis.args_length": 2 - }, - "duration": 326000, - "start": 1685545839502613000 - }]] diff --git a/tests/suitespec.yml b/tests/suitespec.yml index b135ba986c8..69c6b19e8d8 100644 --- a/tests/suitespec.yml +++ b/tests/suitespec.yml @@ -76,7 +76,7 @@ components: - ddtrace/__init__.py - ddtrace/py.typed - ddtrace/version.py - - ddtrace/settings/config.py + - ddtrace/settings/_config.py - src/native/* datastreams: - ddtrace/internal/datastreams/* @@ -116,17 +116,8 @@ components: - ddtrace/_trace/* - ddtrace/trace/* - ddtrace/constants.py - - ddtrace/context.py - - ddtrace/filters.py - - ddtrace/pin.py - - ddtrace/provider.py - - ddtrace/sampler.py - - ddtrace/sampling_rule.py - - ddtrace/span.py - - ddtrace/tracer.py - - ddtrace/tracing/* - ddtrace/settings/__init__.py - - ddtrace/settings/config.py + - ddtrace/settings/_config.py - ddtrace/settings/http.py - ddtrace/settings/exceptions.py - ddtrace/settings/integration.py diff --git a/tests/telemetry/test_writer.py b/tests/telemetry/test_writer.py index 39d672a1c01..48bc7d4b7ac 100644 --- a/tests/telemetry/test_writer.py +++ b/tests/telemetry/test_writer.py @@ -118,7 +118,6 @@ def test_app_started_event(telemetry_writer, test_agent_session, mock_time): {"name": "DD_SPAN_SAMPLING_RULES_FILE", "origin": "unknown", "value": None}, {"name": "DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED", "origin": "unknown", "value": True}, {"name": "DD_TRACE_AGENT_TIMEOUT_SECONDS", "origin": "unknown", "value": 2.0}, - {"name": "DD_TRACE_ANALYTICS_ENABLED", "origin": "unknown", "value": False}, {"name": "DD_TRACE_API_VERSION", "origin": "unknown", "value": None}, {"name": "DD_TRACE_CLIENT_IP_ENABLED", "origin": "unknown", "value": None}, {"name": "DD_TRACE_COMPUTE_STATS", "origin": "unknown", "value": False}, @@ -225,7 +224,6 @@ def test_app_started_event_configuration_override(test_agent_session, run_python env["DD_RUNTIME_METRICS_ENABLED"] = "True" env["DD_SERVICE_MAPPING"] = "default_dd_service:remapped_dd_service" env["DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED"] = "True" - env["DD_TRACE_ANALYTICS_ENABLED"] = "True" env["DD_TRACE_CLIENT_IP_ENABLED"] = "True" env["DD_TRACE_COMPUTE_STATS"] = "True" env["DD_TRACE_DEBUG"] = "True" @@ -237,7 +235,6 @@ def test_app_started_event_configuration_override(test_agent_session, run_python env["DD_TRACE_PROPAGATION_STYLE_INJECT"] = "tracecontext" env["DD_REMOTE_CONFIGURATION_ENABLED"] = "True" env["DD_REMOTE_CONFIG_POLL_INTERVAL_SECONDS"] = "1" - env["DD_TRACE_SAMPLE_RATE"] = "0.5" env["DD_TRACE_RATE_LIMIT"] = "50" env["DD_TRACE_SAMPLING_RULES"] = '[{"sample_rate":1.0,"service":"xyz","name":"abc"}]' env["DD_PROFILING_ENABLED"] = "True" @@ -356,7 +353,6 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_EXCEPTION_REPLAY_CAPTURE_MAX_FRAMES", "origin": "default", "value": 8}, {"name": "DD_EXCEPTION_REPLAY_ENABLED", "origin": "env_var", "value": True}, {"name": "DD_EXPERIMENTAL_APPSEC_STANDALONE_ENABLED", "origin": "default", "value": False}, - {"name": "DD_HTTP_CLIENT_TAG_QUERY_STRING", "origin": "default", "value": None}, {"name": "DD_IAST_DEDUPLICATION_ENABLED", "origin": "default", "value": True}, {"name": "DD_IAST_ENABLED", "origin": "default", "value": False}, {"name": "DD_IAST_MAX_CONCURRENT_REQUESTS", "origin": "default", "value": 2}, @@ -424,7 +420,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python "value": str(file), }, {"name": "DD_SYMBOL_DATABASE_INCLUDES", "origin": "default", "value": "set()"}, - {"name": "DD_SYMBOL_DATABASE_UPLOAD_ENABLED", "origin": "default", "value": False}, + {"name": "DD_SYMBOL_DATABASE_UPLOAD_ENABLED", "origin": "default", "value": True}, {"name": "DD_TAGS", "origin": "env_var", "value": "team:apm,component:web"}, {"name": "DD_TELEMETRY_DEPENDENCY_COLLECTION_ENABLED", "origin": "default", "value": True}, {"name": "DD_TELEMETRY_HEARTBEAT_INTERVAL", "origin": "default", "value": 60}, @@ -433,7 +429,6 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED", "origin": "env_var", "value": True}, {"name": "DD_TRACE_128_BIT_TRACEID_LOGGING_ENABLED", "origin": "default", "value": False}, {"name": "DD_TRACE_AGENT_TIMEOUT_SECONDS", "origin": "default", "value": 2.0}, - {"name": "DD_TRACE_ANALYTICS_ENABLED", "origin": "env_var", "value": True}, {"name": "DD_TRACE_API_VERSION", "origin": "env_var", "value": "v0.5"}, {"name": "DD_TRACE_CLIENT_IP_ENABLED", "origin": "env_var", "value": True}, {"name": "DD_TRACE_CLIENT_IP_HEADER", "origin": "default", "value": None}, @@ -456,13 +451,11 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_TRACE_PROPAGATION_STYLE_INJECT", "origin": "env_var", "value": "tracecontext"}, {"name": "DD_TRACE_RATE_LIMIT", "origin": "env_var", "value": 50}, {"name": "DD_TRACE_REPORT_HOSTNAME", "origin": "default", "value": False}, - {"name": "DD_TRACE_SAMPLE_RATE", "origin": "env_var", "value": 0.5}, { "name": "DD_TRACE_SAMPLING_RULES", "origin": "env_var", "value": '[{"sample_rate":1.0,"service":"xyz","name":"abc"}]', }, - {"name": "DD_TRACE_SPAN_AGGREGATOR_RLOCK", "origin": "default", "value": True}, {"name": "DD_TRACE_SPAN_TRACEBACK_MAX_SIZE", "origin": "default", "value": 30}, {"name": "DD_TRACE_STARTUP_LOGS", "origin": "env_var", "value": True}, {"name": "DD_TRACE_WRITER_BUFFER_SIZE_BYTES", "origin": "env_var", "value": 1000}, @@ -483,6 +476,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "python_build_gnu_type", "origin": "unknown", "value": sysconfig.get_config_var("BUILD_GNU_TYPE")}, {"name": "python_host_gnu_type", "origin": "unknown", "value": sysconfig.get_config_var("HOST_GNU_TYPE")}, {"name": "python_soabi", "origin": "unknown", "value": sysconfig.get_config_var("SOABI")}, + {"name": "trace_sample_rate", "origin": "default", "value": 1.0}, ] assert configurations == expected, configurations diff --git a/tests/tracer/test_correlation_log_context.py b/tests/tracer/test_correlation_log_context.py index b7200b8b38f..e2e5dda6b37 100644 --- a/tests/tracer/test_correlation_log_context.py +++ b/tests/tracer/test_correlation_log_context.py @@ -1,8 +1,8 @@ import pytest from ddtrace import config -from ddtrace.trace import Tracer from ddtrace.trace import tracer +from tests.utils import DummyTracer def global_config(config): @@ -10,7 +10,7 @@ def global_config(config): config.env = "test-env" config.version = "test-version" global tracer - tracer = Tracer() + tracer = DummyTracer() yield config.service = config.env = config.version = None @@ -33,9 +33,9 @@ def format_trace_id(span): @pytest.mark.subprocess() def test_get_log_correlation_service(): """Ensure expected DDLogRecord service is generated via get_correlation_log_record.""" - from ddtrace.trace import Tracer from ddtrace.trace import tracer from tests.tracer.test_correlation_log_context import format_trace_id + from tests.utils import DummyTracer from tests.utils import override_global_config with override_global_config(dict(service="test-service", env="test-env", version="test-version")): @@ -49,7 +49,7 @@ def test_get_log_correlation_service(): "version": "test-version", } - test_tracer = Tracer() + test_tracer = DummyTracer() with test_tracer.trace("test-span-2", service="span-service") as span2: dd_log_record = test_tracer.get_log_correlation_context() assert dd_log_record == { @@ -65,12 +65,12 @@ def test_get_log_correlation_service(): def test_get_log_correlation_context_basic(): """Ensure expected DDLogRecord is generated via get_correlation_log_record.""" from ddtrace.trace import Context - from ddtrace.trace import Tracer from tests.tracer.test_correlation_log_context import format_trace_id + from tests.utils import DummyTracer from tests.utils import override_global_config with override_global_config(dict(service="test-service", env="test-env", version="test-version")): - tracer = Tracer() + tracer = DummyTracer() with tracer.trace("test-span-1") as span1: dd_log_record = tracer.get_log_correlation_context() assert dd_log_record == { @@ -80,7 +80,7 @@ def test_get_log_correlation_context_basic(): "env": "test-env", "version": "test-version", }, dd_log_record - test_tracer = Tracer() + test_tracer = DummyTracer() with test_tracer.trace("test-span-2") as span2: dd_log_record = test_tracer.get_log_correlation_context() assert dd_log_record == { @@ -130,9 +130,9 @@ def test_get_log_correlation_context_opentracer(): @pytest.mark.subprocess() def test_get_log_correlation_context_no_active_span(): """Ensure empty DDLogRecord generated if no active span.""" - from ddtrace.trace import Tracer + from tests.utils import DummyTracer - tracer = Tracer() + tracer = DummyTracer() dd_log_record = tracer.get_log_correlation_context() assert dd_log_record == { "span_id": "0", @@ -146,9 +146,8 @@ def test_get_log_correlation_context_no_active_span(): @pytest.mark.subprocess() def test_get_log_correlation_context_disabled_tracer(): """Ensure get_correlation_log_record returns None if tracer is disabled.""" - from ddtrace.trace import Tracer + from ddtrace.trace import tracer - tracer = Tracer() tracer.enabled = False with tracer.trace("test-span"): dd_log_record = tracer.get_log_correlation_context() diff --git a/tests/tracer/test_encoders.py b/tests/tracer/test_encoders.py index 7006bc6b95d..4fe48a2a838 100644 --- a/tests/tracer/test_encoders.py +++ b/tests/tracer/test_encoders.py @@ -869,19 +869,3 @@ def test_json_encoder_traces_bytes(): assert "\\x80span.a" == span_a["name"] assert "\x80span.b" == span_b["name"] assert "\x80span.b" == span_c["name"] - - -@pytest.mark.subprocess(env={"DD_TRACE_API_VERSION": "v0.3"}) -def test_v03_trace_api_deprecation(): - import warnings - - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("always") - from ddtrace.trace import tracer - - assert tracer._writer._api_version == "v0.4" - assert len(warns) == 1, warns - assert ( - warns[0].message.args[0] == "DD_TRACE_API_VERSION=v0.3 is deprecated and will be " - "removed in version '3.0.0': Traces will be submitted to the v0.4/traces agent endpoint instead." - ), warns[0].message diff --git a/tests/tracer/test_gitmetadata.py b/tests/tracer/test_gitmetadata.py index cb03d59f7e2..d6c35a2de0c 100644 --- a/tests/tracer/test_gitmetadata.py +++ b/tests/tracer/test_gitmetadata.py @@ -8,10 +8,9 @@ import pytest -import ddtrace from ddtrace.internal import gitmetadata from tests.subprocesstest import run_in_subprocess -from tests.utils import DummyWriter +from tests.utils import DummyTracer from tests.utils import TracerTestCase @@ -44,8 +43,8 @@ class GitMetadataTestCase(TracerTestCase): ) ) def test_gitmetadata_from_package(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -59,8 +58,8 @@ def test_gitmetadata_from_package(self): ) ) def test_gitmetadata_from_DD_TAGS(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -80,8 +79,8 @@ def test_gitmetadata_from_DD_TAGS(self): ) ) def test_gitmetadata_from_ENV(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -104,8 +103,8 @@ def test_gitmetadata_from_ENV(self): ) ) def test_gitmetadata_disabled(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -123,8 +122,8 @@ def test_gitmetadata_disabled(self): ) ) def test_gitmetadata_package_without_metadata(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -143,8 +142,8 @@ def test_gitmetadata_package_without_metadata(self): ) ) def test_gitmetadata_from_env_filtering_https(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -163,8 +162,8 @@ def test_gitmetadata_from_env_filtering_https(self): ) ) def test_gitmetadata_from_ddtags_filtering_https(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -184,8 +183,8 @@ def test_gitmetadata_from_ddtags_filtering_https(self): ) ) def test_gitmetadata_from_env_filtering_ssh(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass @@ -204,8 +203,8 @@ def test_gitmetadata_from_env_filtering_ssh(self): ) ) def test_gitmetadata_from_ddtags_filtering_ssh(self): - tracer = ddtrace.trace.Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() + with tracer.trace("span") as s: pass diff --git a/tests/tracer/test_logger.py b/tests/tracer/test_logger.py index 00edeeb18b2..55a712d1ac0 100644 --- a/tests/tracer/test_logger.py +++ b/tests/tracer/test_logger.py @@ -3,7 +3,8 @@ import mock import pytest -from ddtrace.internal.logger import DDLogger +import ddtrace.internal.logger +from ddtrace.internal.logger import LoggingBucket from ddtrace.internal.logger import get_logger from tests.utils import BaseTestCase @@ -11,22 +12,28 @@ ALL_LEVEL_NAMES = ("debug", "info", "warning", "error", "exception", "critical", "fatal") -class DDLoggerTestCase(BaseTestCase): +class LoggerTestCase(BaseTestCase): def setUp(self): - super(DDLoggerTestCase, self).setUp() + super(LoggerTestCase, self).setUp() - self.root = logging.root - self.manager = self.root.manager + self.manager = logging.root.manager + + # Reset to default values + ddtrace.internal.logger._buckets.clear() + ddtrace.internal.logger._rate_limit = 60 def tearDown(self): # Weeee, forget all existing loggers logging.Logger.manager.loggerDict.clear() self.assertEqual(logging.Logger.manager.loggerDict, dict()) - self.root = None self.manager = None - super(DDLoggerTestCase, self).tearDown() + # Reset to default values + ddtrace.internal.logger._buckets.clear() + ddtrace.internal.logger._rate_limit = 60 + + super(LoggerTestCase, self).tearDown() def _make_record( self, @@ -42,42 +49,31 @@ def _make_record( ): return logger.makeRecord(logger.name, level, fn, lno, msg, args, exc_info, func, extra) - @mock.patch("ddtrace.internal.logger.DDLogger.handle") - def assert_log_records(self, log, expected_levels, handle): - for name in ALL_LEVEL_NAMES: - method = getattr(log, name) - method("test") - - records = [args[0][0] for args in handle.call_args_list] - for record in records: - self.assertIsInstance(record, logging.LogRecord) - self.assertTrue("test.logger" in record.name or "ddtrace" in record.name) - - levels = [r.levelname for r in records] - self.assertEqual(levels, expected_levels) - def test_get_logger(self): """ When using `get_logger` to get a logger When the logger does not exist - We create a new DDLogger + We create a new logging.Logger When the logger exists We return the expected logger When a different logger is requested - We return a new DDLogger + We return a new logging.Logger When a Placeholder exists - We return DDLogger + We return logging.Logger """ + assert self.manager is not None + # Assert the logger doesn't already exist self.assertNotIn("test.logger", self.manager.loggerDict) # Fetch a new logger log = get_logger("test.logger") + assert ddtrace.internal.logger.log_filter in log.filters self.assertEqual(log.name, "test.logger") self.assertEqual(log.level, logging.NOTSET) - # Ensure it is a DDLogger - self.assertIsInstance(log, DDLogger) + # Ensure it is a logging.Logger + self.assertIsInstance(log, logging.Logger) # Make sure it is stored in all the places we expect self.assertEqual(self.manager.getLogger("test.logger"), log) self.assertEqual(self.manager.loggerDict["test.logger"], log) @@ -93,163 +89,49 @@ def test_get_logger(self): self.assertNotEqual(log, new_log) # If a PlaceHolder is in place of the logger - # We should return the DDLogger + # We should return the logging.Logger self.assertIsInstance(self.manager.loggerDict["new.test"], logging.PlaceHolder) log = get_logger("new.test") self.assertEqual(log.name, "new.test") - self.assertIsInstance(log, DDLogger) - - def test_get_logger_children(self): - """ - When using `get_logger` to get a logger - We appropriately assign children loggers - - DEV: This test case is to ensure we are calling `manager._fixupChildren(logger)` - """ - root = get_logger("test") - root.setLevel(logging.WARNING) - - child_logger = get_logger("test.newplaceholder.long.component") - self.assertEqual(child_logger.parent, root) - - parent_logger = get_logger("test.newplaceholder") - self.assertEqual(child_logger.parent, parent_logger) - - parent_logger.setLevel(logging.INFO) - # Because the child logger's level remains unset, it should inherit - # the level of its closest parent, which is INFO. - # If we did not properly maintain the logger tree, this would fail - # because child_logger would be set to the default when it was created - # which was logging.WARNING. - self.assertEqual(child_logger.getEffectiveLevel(), logging.INFO) - - # Clean up for future tests. - root.setLevel(logging.NOTSET) - - def test_get_logger_parents(self): - """ - When using `get_logger` to get a logger - We appropriately assign parent loggers - - DEV: This test case is to ensure we are calling `manager._fixupParents(logger)` - """ - # Fetch a new logger - test_log = get_logger("test") - self.assertEqual(test_log.parent, self.root) - - # Fetch a new child log - # Auto-associate with parent `test` logger - child_log = get_logger("test.child") - self.assertEqual(child_log.parent, test_log) - - # Deep child - deep_log = get_logger("test.child.logger.from.test.case") - self.assertEqual(deep_log.parent, child_log) - - def test_logger_init(self): - """ - When creating a new DDLogger - Has the same interface as logging.Logger - Configures a defaultdict for buckets - Properly configures the rate limit - """ - # Create a logger - log = DDLogger("test.logger") - - # Ensure we set the name and use default log level - self.assertEqual(log.name, "test.logger") - self.assertEqual(log.level, logging.NOTSET) - - # Assert DDLogger default properties - self.assertIsInstance(log.buckets, dict) - self.assertEqual(log.rate_limit, 60) - - # Assert manager and parent - # DEV: Parent is `None` because `manager._findParents()` doesn't get called - # unless we use `get_logger` (this is the same behavior as `logging.getLogger` and `Logger('name')`) - self.assertEqual(log.manager, self.manager) - self.assertIsNone(log.parent) - - # Override rate limit from environment variable - with self.override_env(dict(DD_TRACE_LOGGING_RATE="10")): - log = DDLogger("test.logger") - self.assertEqual(log.rate_limit, 10) - - # Set specific log level - log = DDLogger("test.logger", level=logging.DEBUG) - self.assertEqual(log.level, logging.DEBUG) - - def test_logger_log(self): - """ - When calling `DDLogger` log methods - We call `DDLogger.handle` with the expected log record - """ - log = get_logger("test.logger") - - # -- NOTSET - # By default no level is set so we only get warn, error, and critical messages - self.assertEqual(log.level, logging.NOTSET) - # `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - # -- CRITICAL - log.setLevel(logging.CRITICAL) - # `log.critical`, `log.fatal` - self.assert_log_records(log, ["CRITICAL", "CRITICAL"]) - - # -- ERROR - log.setLevel(logging.ERROR) - # `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - # -- WARN - log.setLevel(logging.WARN) - # `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) + self.assertIsInstance(log, logging.Logger) - # -- INFO - log.setLevel(logging.INFO) - # `log.info`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["INFO", "WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - # -- DEBUG - log.setLevel(logging.DEBUG) - # `log.debug`, `log.info`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` - self.assert_log_records(log, ["DEBUG", "INFO", "WARNING", "ERROR", "ERROR", "CRITICAL", "CRITICAL"]) - - @mock.patch("logging.Logger.handle") - def test_logger_handle_no_limit(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_no_limit(self, call_handlers): """ - Calling `DDLogger.handle` + Calling `logging.Logger.handle` When no rate limit is set Always calls the base `Logger.handle` """ # Configure an INFO logger with no rate limit log = get_logger("test.logger") log.setLevel(logging.INFO) - log.rate_limit = 0 + ddtrace.internal.logger._rate_limit = 0 # Log a bunch of times very quickly (this is fast) for _ in range(1000): log.info("test") # Assert that we did not perform any rate limiting - self.assertEqual(base_handle.call_count, 1000) + self.assertEqual(call_handlers.call_count, 1000) # Our buckets are empty - self.assertEqual(log.buckets, dict()) + self.assertEqual(ddtrace.internal.logger._buckets, dict()) - @mock.patch("logging.Logger.handle") - def test_logger_handle_debug(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_debug(self, call_handlers): """ - Calling `DDLogger.handle` + Calling `logging.Logger.handle` When effective level is DEBUG Always calls the base `Logger.handle` """ + # Our buckets are empty + self.assertEqual(ddtrace.internal.logger._buckets, dict()) + # Configure an INFO logger with no rate limit log = get_logger("test.logger") log.setLevel(logging.DEBUG) - assert log.rate_limit > 0 + assert log.getEffectiveLevel() == logging.DEBUG + assert ddtrace.internal.logger._rate_limit > 0 # Log a bunch of times very quickly (this is fast) for level in ALL_LEVEL_NAMES: @@ -259,15 +141,15 @@ def test_logger_handle_debug(self, base_handle): # Assert that we did not perform any rate limiting total = 1000 * len(ALL_LEVEL_NAMES) - self.assertTrue(total <= base_handle.call_count <= total + 1) + self.assertTrue(total <= call_handlers.call_count <= total + 1) # Our buckets are empty - self.assertEqual(log.buckets, dict()) + self.assertEqual(ddtrace.internal.logger._buckets, dict()) - @mock.patch("logging.Logger.handle") - def test_logger_handle_bucket(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_bucket(self, call_handlers): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` With a record We pass it to the base `Logger.handle` We create a bucket for tracking @@ -279,22 +161,22 @@ def test_logger_handle_bucket(self, base_handle): log.handle(record) # We passed to base Logger.handle - base_handle.assert_called_once_with(record) + call_handlers.assert_called_once_with(record) # We added an bucket entry for this record key = (record.name, record.levelno, record.pathname, record.lineno) - logging_bucket = log.buckets.get(key) - self.assertIsInstance(logging_bucket, DDLogger.LoggingBucket) + logging_bucket = ddtrace.internal.logger._buckets.get(key) + self.assertIsInstance(logging_bucket, LoggingBucket) # The bucket entry is correct - expected_bucket = int(record.created / log.rate_limit) + expected_bucket = int(record.created / ddtrace.internal.logger._rate_limit) self.assertEqual(logging_bucket.bucket, expected_bucket) self.assertEqual(logging_bucket.skipped, 0) - @mock.patch("logging.Logger.handle") - def test_logger_handle_bucket_limited(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_bucket_limited(self, call_handlers): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` With multiple records in a single time frame We pass only the first to the base `Logger.handle` We keep track of the number skipped @@ -302,7 +184,8 @@ def test_logger_handle_bucket_limited(self, base_handle): log = get_logger("test.logger") # Create log record and handle it - first_record = self._make_record(log, msg="first") + record = self._make_record(log, msg="first") + first_record = record log.handle(first_record) for _ in range(100): @@ -312,21 +195,22 @@ def test_logger_handle_bucket_limited(self, base_handle): log.handle(record) # We passed to base Logger.handle - base_handle.assert_called_once_with(first_record) + call_handlers.assert_called_once_with(first_record) # We added an bucket entry for these records key = (record.name, record.levelno, record.pathname, record.lineno) - logging_bucket = log.buckets.get(key) + logging_bucket = ddtrace.internal.logger._buckets.get(key) + assert logging_bucket is not None # The bucket entry is correct - expected_bucket = int(first_record.created / log.rate_limit) + expected_bucket = int(first_record.created / ddtrace.internal.logger._rate_limit) self.assertEqual(logging_bucket.bucket, expected_bucket) self.assertEqual(logging_bucket.skipped, 100) - @mock.patch("logging.Logger.handle") - def test_logger_handle_bucket_skipped_msg(self, base_handle): + @mock.patch("logging.Logger.callHandlers") + def test_logger_handle_bucket_skipped_msg(self, call_handlers): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` When a bucket exists for a previous time frame We pass only the record to the base `Logger.handle` We update the record message to include the number of skipped messages @@ -340,15 +224,15 @@ def test_logger_handle_bucket_skipped_msg(self, base_handle): # Create a bucket entry for this record key = (record.name, record.levelno, record.pathname, record.lineno) - bucket = int(record.created / log.rate_limit) + bucket = int(record.created / ddtrace.internal.logger._rate_limit) # We want the time bucket to be for an older bucket - log.buckets[key] = DDLogger.LoggingBucket(bucket=bucket - 1, skipped=20) + ddtrace.internal.logger._buckets[key] = LoggingBucket(bucket=bucket - 1, skipped=20) # Handle our record log.handle(record) # We passed to base Logger.handle - base_handle.assert_called_once_with(record) + call_handlers.assert_called_once_with(record) self.assertEqual(record.msg, original_msg + ", %s additional messages skipped") self.assertEqual(record.args, original_args + (20,)) @@ -356,7 +240,7 @@ def test_logger_handle_bucket_skipped_msg(self, base_handle): def test_logger_handle_bucket_key(self): """ - When calling `DDLogger.handle` + When calling `logging.Logger.handle` With different log messages We use different buckets to limit them """ @@ -388,7 +272,7 @@ def get_key(record): all_records = (record1, record2, record3, record4, record5, record6) [log.handle(record) for record in all_records] - buckets = log.buckets + buckets = ddtrace.internal.logger._buckets # We have 6 records but only end up with 5 buckets self.assertEqual(len(buckets), 5) diff --git a/tests/tracer/test_memory_leak.py b/tests/tracer/test_memory_leak.py index 7fdcd7589f6..b13cc0ec4ee 100644 --- a/tests/tracer/test_memory_leak.py +++ b/tests/tracer/test_memory_leak.py @@ -1,28 +1,21 @@ """ Variety of test cases ensuring that ddtrace does not leak memory. """ - -import gc -from threading import Thread -from typing import TYPE_CHECKING from weakref import WeakValueDictionary import pytest +from ddtrace.trace import Span from ddtrace.trace import Tracer - - -if TYPE_CHECKING: # pragma: no cover - from ddtrace.trace import Span # noqa:F401 +from tests.utils import DummyTracer @pytest.fixture -def tracer() -> Tracer: - return Tracer() +def tracer() -> DummyTracer: + return DummyTracer() -def trace(weakdict: WeakValueDictionary, tracer: Tracer, *args, **kwargs): - # type: (...) -> Span +def trace(weakdict: WeakValueDictionary, tracer: Tracer, *args, **kwargs) -> Span: """Return a span created from ``tracer`` and add it to the given weak dictionary. @@ -34,7 +27,14 @@ def trace(weakdict: WeakValueDictionary, tracer: Tracer, *args, **kwargs): return s -def test_leak(tracer): +@pytest.mark.subprocess +def test_leak(): + import gc + from weakref import WeakValueDictionary + + from ddtrace.trace import tracer + from tests.tracer.test_memory_leak import trace + wd = WeakValueDictionary() with trace(wd, tracer, "span1") as span: with trace(wd, tracer, "span2") as span2: @@ -44,15 +44,23 @@ def test_leak(tracer): # The spans are still open and referenced so they should not be gc'd gc.collect() assert len(wd) == 2 + tracer.flush() del span, span2 gc.collect() assert len(wd) == 0 -def test_single_thread_single_trace(tracer): +@pytest.mark.subprocess +def test_single_thread_single_trace(): """ Ensure a simple trace doesn't leak span objects. """ + import gc + from weakref import WeakValueDictionary + + from ddtrace.trace import tracer + from tests.tracer.test_memory_leak import trace + wd = WeakValueDictionary() with trace(wd, tracer, "span1"): with trace(wd, tracer, "span2"): @@ -64,10 +72,17 @@ def test_single_thread_single_trace(tracer): assert len(wd) == 0 -def test_single_thread_multi_trace(tracer): +@pytest.mark.subprocess +def test_single_thread_multi_trace(): """ Ensure a trace in a thread is properly garbage collected. """ + import gc + from weakref import WeakValueDictionary + + from ddtrace.trace import tracer + from tests.tracer.test_memory_leak import trace + wd = WeakValueDictionary() for _ in range(1000): with trace(wd, tracer, "span1"): @@ -75,17 +90,25 @@ def test_single_thread_multi_trace(tracer): pass with trace(wd, tracer, "span3"): pass - + tracer.flush() # Once these references are deleted then the spans should no longer be # referenced by anything and should be gc'd. gc.collect() assert len(wd) == 0 -def test_multithread_trace(tracer): +@pytest.mark.subprocess +def test_multithread_trace(): """ Ensure a trace that crosses thread boundaries is properly garbage collected. """ + import gc + from threading import Thread + from weakref import WeakValueDictionary + + from ddtrace.trace import tracer + from tests.tracer.test_memory_leak import trace + wd = WeakValueDictionary() state = [] @@ -102,6 +125,7 @@ def _target(ctx): # Ensure thread finished successfully assert state == [1] + tracer.flush() del span gc.collect() assert len(wd) == 0 diff --git a/tests/tracer/test_processors.py b/tests/tracer/test_processors.py index ff19453555b..8c9955f9ffd 100644 --- a/tests/tracer/test_processors.py +++ b/tests/tracer/test_processors.py @@ -26,7 +26,6 @@ from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE from ddtrace.trace import Context from ddtrace.trace import Span -from ddtrace.trace import Tracer from tests.utils import DummyTracer from tests.utils import DummyWriter from tests.utils import override_global_config @@ -244,7 +243,7 @@ def test_aggregator_partial_flush_2_spans(): def test_trace_top_level_span_processor_partial_flushing(): """Parent span and child span have the same service name""" - tracer = Tracer() + tracer = DummyTracer() tracer._configure( partial_flush_enabled=True, partial_flush_min_spans=2, @@ -271,8 +270,7 @@ def test_trace_top_level_span_processor_partial_flushing(): def test_trace_top_level_span_processor_same_service_name(): """Parent span and child span have the same service name""" - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() with tracer.trace("parent", service="top_level_test") as parent: with tracer.trace("child") as child: @@ -285,8 +283,7 @@ def test_trace_top_level_span_processor_same_service_name(): def test_trace_top_level_span_processor_different_service_name(): """Parent span and child span have the different service names""" - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() with tracer.trace("parent", service="top_level_test_service") as parent: with tracer.trace("child", service="top_level_test_service2") as child: @@ -299,8 +296,7 @@ def test_trace_top_level_span_processor_different_service_name(): def test_trace_top_level_span_processor_orphan_span(): """Trace chuck does not contain parent span""" - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() with tracer.trace("parent") as parent: pass @@ -388,7 +384,7 @@ def test_span_creation_metrics(): def test_changing_tracer_sampler_changes_tracesamplingprocessor_sampler(): """Changing the tracer sampler should change the sampling processor's sampler""" - tracer = Tracer() + tracer = DummyTracer() # get processor for aggregator in tracer._deferred_processors: if type(aggregator) is SpanAggregator: @@ -632,9 +628,8 @@ def test_endpoint_call_counter_processor_disabled(): def test_endpoint_call_counter_processor_real_tracer(): - tracer = Tracer() + tracer = DummyTracer() tracer._endpoint_call_counter_span_processor.enable() - tracer._configure(writer=DummyWriter()) with tracer.trace("parent", service="top_level_test_service", resource="a", span_type=SpanTypes.WEB): with tracer.trace("child", service="top_level_test_service2"): @@ -656,8 +651,7 @@ def test_endpoint_call_counter_processor_real_tracer(): def test_trace_tag_processor_adds_chunk_root_tags(): - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() with tracer.trace("parent") as parent: with tracer.trace("child") as child: @@ -679,7 +673,7 @@ def on_span_finish(self, span): tp = TestProcessor() tp.register() - tracer = Tracer() + tracer = DummyTracer() with tracer.trace("test") as span: assert span.get_tag("on_start") == "ok" diff --git a/tests/tracer/test_sampler.py b/tests/tracer/test_sampler.py index 813dc1be439..f54c7de55da 100644 --- a/tests/tracer/test_sampler.py +++ b/tests/tracer/test_sampler.py @@ -1,6 +1,5 @@ from __future__ import division -import re import unittest import mock @@ -250,7 +249,7 @@ def test_sampling_rule_init_defaults(): def test_sampling_rule_init(): - a_regex = re.compile(r"\.request$") + a_regex = "*request" a_string = "my-service" rule = SamplingRule( @@ -261,7 +260,7 @@ def test_sampling_rule_init(): assert rule.sample_rate == 0.0, "SamplingRule should store the rate it's initialized with" assert rule.service.pattern == a_string, "SamplingRule should store the service it's initialized with" - assert rule.name == a_regex, "SamplingRule should store the name regex it's initialized with" + assert rule.name.pattern == a_regex, "SamplingRule should store the name regex it's initialized with" @pytest.mark.parametrize( @@ -272,38 +271,13 @@ def test_sampling_rule_init(): (SamplingRule(sample_rate=0.0), SamplingRule(sample_rate=0.0), True), (SamplingRule(sample_rate=0.5), SamplingRule(sample_rate=1.0), False), (SamplingRule(sample_rate=1.0, service="my-svc"), SamplingRule(sample_rate=1.0, service="my-svc"), True), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - True, - ), (SamplingRule(sample_rate=1.0, service="my-svc"), SamplingRule(sample_rate=1.0, service="other-svc"), False), (SamplingRule(sample_rate=1.0, service="my-svc"), SamplingRule(sample_rate=0.5, service="my-svc"), False), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - SamplingRule(sample_rate=0.5, service=re.compile("my-svc")), - False, - ), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - SamplingRule(sample_rate=1.0, service=re.compile("other")), - False, - ), ( SamplingRule(sample_rate=1.0, name="span.name"), SamplingRule(sample_rate=1.0, name="span.name"), True, ), - ( - SamplingRule(sample_rate=1.0, name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, name=re.compile("span.name")), - True, - ), - ( - SamplingRule(sample_rate=1.0, name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, name=re.compile("span.other")), - False, - ), ( SamplingRule(sample_rate=1.0, name="span.name"), SamplingRule(sample_rate=0.5, name="span.name"), @@ -316,16 +290,6 @@ def test_sampling_rule_init(): SamplingRule(sample_rate=1.0, service="my-svc", name="span.name"), True, ), - ( - SamplingRule(sample_rate=1.0, service="my-svc", name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, service="my-svc", name=re.compile("span.name")), - True, - ), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc"), name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, service=re.compile("my-svc"), name=re.compile("span.name")), - True, - ), ( SamplingRule(sample_rate=1.0, service="my-svc", name="span.name"), SamplingRule(sample_rate=0.5, service="my-svc", name="span.name"), @@ -491,15 +455,6 @@ def test_sampling_rule_init_via_env(): ("test.span", None, False), ("test.span", "test.span", True), ("test.span", "test_span", False), - ("test.span", re.compile(r"^test\.span$"), True), - ("test_span", re.compile(r"^test.span$"), True), - ("test.span", re.compile(r"^test_span$"), False), - ("test.span", re.compile(r"test"), True), - ("test.span", re.compile(r"test\.span|another\.span"), True), - ("another.span", re.compile(r"test\.span|another\.span"), True), - ("test.span", lambda name: "span" in name, True), - ("test.span", lambda name: "span" not in name, False), - ("test.span", lambda name: 1 / 0, False), ] ], ) @@ -518,20 +473,8 @@ def test_sampling_rule_matches_name(span, rule, span_expected_to_match_rule): ("my-service", None, False), (None, "tests.tracer", True), ("tests.tracer", "my-service", False), - ("tests.tracer", re.compile(r"my-service"), False), - ("tests.tracer", lambda service: "service" in service, False), ("my-service", "my-service", True), ("my-service", "my_service", False), - ("my-service", re.compile(r"^my-"), True), - ("my_service", re.compile(r"^my[_-]"), True), - ("my-service", re.compile(r"^my_"), False), - ("my-service", re.compile(r"my-service"), True), - ("my-service", re.compile(r"my"), True), - ("my-service", re.compile(r"my-service|another-service"), True), - ("another-service", re.compile(r"my-service|another-service"), True), - ("my-service", lambda service: "service" in service, True), - ("my-service", lambda service: "service" not in service, False), - ("my-service", lambda service: 1 / 0, False), ] ], ) @@ -553,7 +496,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=1, name="test.span", - service=re.compile(r"^my-"), + service="my-*", ), True, ), @@ -567,7 +510,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=0, name="test.span", - service=re.compile(r"^my-"), + service="my-*", ), True, ), @@ -580,7 +523,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=1, name="test_span", - service=re.compile(r"^my-"), + service="my-*", ), False, ), @@ -593,7 +536,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=1, name="test.span", - service=re.compile(r"^service-"), + service="service-", ), False, ), @@ -605,26 +548,6 @@ def test_sampling_rule_matches(span, rule, span_expected_to_match_rule): ) -def test_sampling_rule_matches_exception(): - def pattern(prop): - raise Exception("an error occurred") - - rule = SamplingRule(sample_rate=1.0, name=pattern) - span = create_span(name="test.span") - - with mock.patch("ddtrace._trace.sampling_rule.log") as mock_log: - assert ( - rule.matches(span) is False - ), "SamplingRule should not match when its name pattern function throws an exception" - mock_log.warning.assert_called_once_with( - "%r pattern %r failed with %r", - rule, - pattern, - "test.span", - exc_info=True, - ) - - @pytest.mark.subprocess( parametrize={"DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED": ["true", "false"]}, ) @@ -645,21 +568,6 @@ def test_sampling_rule_sample(): ) -@pytest.mark.subprocess(env={"DD_TRACE_SAMPLE_RATE": "0.2"}) -def test_sampling_rate_config_deprecated(): - import warnings - - with warnings.catch_warnings(record=True) as ws: - warnings.simplefilter("always") - - from ddtrace import config - - assert config._trace_sample_rate == 0.2 - - assert len(ws) >= 1 - assert any(w for w in ws if "DD_TRACE_SAMPLE_RATE is deprecated" in str(w.message)), [w.message for w in ws] - - def test_sampling_rule_sample_rate_1(): rule = SamplingRule(sample_rate=1) @@ -727,15 +635,6 @@ def test_datadog_sampler_init(): SamplingRule(sample_rate=0.5) ], "DatadogSampler initialized with no arguments and envvars set should hold a sample_rate from the envvar" - with override_global_config(dict(_trace_sample_rate=0)): - sampler = DatadogSampler() - assert ( - sampler.limiter.rate_limit == DatadogSampler.DEFAULT_RATE_LIMIT - ), "DatadogSampler initialized with DD_TRACE_SAMPLE_RATE=0 envvar should hold the default rate limit" - assert sampler.rules == [ - SamplingRule(sample_rate=0) - ], "DatadogSampler initialized with DD_TRACE_SAMPLE_RATE=0 envvar should hold sample_rate=0" - with override_global_config(dict(_trace_sample_rate="asdf")): with pytest.raises(ValueError): DatadogSampler() diff --git a/tests/tracer/test_single_span_sampling_rules.py b/tests/tracer/test_single_span_sampling_rules.py index ef33ecfd619..7fcd0ef7a54 100644 --- a/tests/tracer/test_single_span_sampling_rules.py +++ b/tests/tracer/test_single_span_sampling_rules.py @@ -10,9 +10,7 @@ from ddtrace.internal.sampling import SpanSamplingRule from ddtrace.internal.sampling import _get_file_json from ddtrace.internal.sampling import get_span_sampling_rules -from ddtrace.trace import Tracer from tests.utils import DummyTracer -from tests.utils import DummyWriter from ..utils import override_global_config @@ -129,8 +127,7 @@ def test_env_rules_cause_matching_span_to_be_sampled(): sampling_rules = get_span_sampling_rules() assert sampling_rules[0]._service_matcher.pattern == "test_service" assert sampling_rules[0]._name_matcher.pattern == "test_name" - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() span = traced_function(sampling_rules[0], tracer=tracer) assert_sampling_decision_tags(span) @@ -141,8 +138,7 @@ def test_env_rules_dont_cause_non_matching_span_to_be_sampled(): sampling_rules = get_span_sampling_rules() assert sampling_rules[0]._service_matcher.pattern == "test_ser" assert sampling_rules[0]._name_matcher.pattern == "test_na" - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() span = traced_function(sampling_rules[0], tracer=tracer) assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None) @@ -153,8 +149,7 @@ def test_single_span_rules_not_applied_when_span_sampled_by_trace_sampling(): sampling_rules = get_span_sampling_rules() assert sampling_rules[0]._service_matcher.pattern == "test_service" assert sampling_rules[0]._name_matcher.pattern == "test_name" - tracer = Tracer() - tracer._configure(writer=DummyWriter()) + tracer = DummyTracer() span = traced_function(sampling_rules[0], tracer=tracer, trace_sampling=True) assert sampling_rules[0].match(span) is True assert_sampling_decision_tags(span, sample_rate=None, mechanism=None, limit=None, trace_sampling=True) diff --git a/tests/tracer/test_trace_utils.py b/tests/tracer/test_trace_utils.py index ca564cac394..6820b0c4d76 100644 --- a/tests/tracer/test_trace_utils.py +++ b/tests/tracer/test_trace_utils.py @@ -28,7 +28,6 @@ from ddtrace.trace import Context from ddtrace.trace import Pin from ddtrace.trace import Span -from ddtrace.trace import Tracer from tests.appsec.utils import asm_context from tests.utils import override_global_config @@ -277,9 +276,8 @@ def test_int_service(int_config, pin, config_val, default, global_service, expec assert trace_utils.int_service(pin, int_config.myint, default) == expected -def test_int_service_integration(int_config): +def test_int_service_integration(int_config, tracer): pin = Pin() - tracer = Tracer() assert trace_utils.int_service(pin, int_config.myint) == "tests.tracer" with override_global_config(dict(service="global-svc")): @@ -905,8 +903,7 @@ def test_distributed_tracing_enabled(int_config, props, default, expected): assert trace_utils.distributed_tracing_enabled(int_config.myint, **kwargs) == expected, (props, default, expected) -def test_activate_distributed_headers_enabled(int_config): - tracer = Tracer() +def test_activate_distributed_headers_enabled(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = True headers = { HTTP_HEADER_PARENT_ID: "12345", @@ -925,8 +922,7 @@ def test_activate_distributed_headers_enabled(int_config): assert context.span_id == 12345 -def test_activate_distributed_headers_disabled(int_config): - tracer = Tracer() +def test_activate_distributed_headers_disabled(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = False headers = { HTTP_HEADER_PARENT_ID: "12345", @@ -941,16 +937,14 @@ def test_activate_distributed_headers_disabled(int_config): assert tracer.context_provider.active() is None -def test_activate_distributed_headers_no_headers(int_config): - tracer = Tracer() +def test_activate_distributed_headers_no_headers(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = True trace_utils.activate_distributed_headers(tracer, int_config=int_config.myint, request_headers=None) assert tracer.context_provider.active() is None -def test_activate_distributed_headers_override_true(int_config): - tracer = Tracer() +def test_activate_distributed_headers_override_true(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = False headers = { HTTP_HEADER_PARENT_ID: "12345", @@ -964,8 +958,7 @@ def test_activate_distributed_headers_override_true(int_config): assert context.span_id == 12345 -def test_activate_distributed_headers_override_false(int_config): - tracer = Tracer() +def test_activate_distributed_headers_override_false(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = True headers = { HTTP_HEADER_PARENT_ID: "12345", @@ -977,8 +970,7 @@ def test_activate_distributed_headers_override_false(int_config): assert tracer.context_provider.active() is None -def test_activate_distributed_headers_existing_context(int_config): - tracer = Tracer() +def test_activate_distributed_headers_existing_context(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = True headers = { @@ -993,8 +985,7 @@ def test_activate_distributed_headers_existing_context(int_config): assert tracer.context_provider.active() == ctx -def test_activate_distributed_headers_existing_context_different_trace_id(int_config): - tracer = Tracer() +def test_activate_distributed_headers_existing_context_different_trace_id(int_config, tracer): int_config.myint["distributed_tracing_enabled"] = True headers = { diff --git a/tests/tracer/test_tracer.py b/tests/tracer/test_tracer.py index 0a75e5fc037..3445dfbefb2 100644 --- a/tests/tracer/test_tracer.py +++ b/tests/tracer/test_tracer.py @@ -9,7 +9,6 @@ from os import getpid import threading from unittest.case import SkipTest -import weakref import mock import pytest @@ -30,8 +29,7 @@ from ddtrace.constants import VERSION_KEY from ddtrace.contrib.internal.trace_utils import set_user from ddtrace.ext import user -from ddtrace.internal._encoding import MsgpackEncoderV04 -from ddtrace.internal._encoding import MsgpackEncoderV05 +import ddtrace.internal from ddtrace.internal.compat import PYTHON_VERSION_INFO from ddtrace.internal.rate_limiter import RateLimiter from ddtrace.internal.serverless import has_aws_lambda_agent_extension @@ -40,8 +38,9 @@ from ddtrace.internal.writer import LogWriter from ddtrace.settings import Config from ddtrace.trace import Context -from ddtrace.trace import Tracer +from ddtrace.trace import tracer as global_tracer from tests.subprocesstest import run_in_subprocess +from tests.utils import DummyTracer from tests.utils import TracerTestCase from tests.utils import override_global_config @@ -485,32 +484,6 @@ def test_adding_mapped_services(self): pass assert self.tracer._services == set(["one", "three"]) - def test_configure_dogstatsd_url_host_port(self): - tracer = Tracer() - tracer._configure(dogstatsd_url="foo:1234") - assert tracer._writer.dogstatsd.host == "foo" - assert tracer._writer.dogstatsd.port == 1234 - - tracer = Tracer() - writer = AgentWriter("http://localhost:8126") - tracer._configure(writer=writer, dogstatsd_url="foo:1234") - assert tracer._writer.dogstatsd.host == "foo" - assert tracer._writer.dogstatsd.port == 1234 - - def test_configure_dogstatsd_url_socket(self): - tracer = Tracer() - tracer._configure(dogstatsd_url="unix:///foo.sock") - assert tracer._writer.dogstatsd.host is None - assert tracer._writer.dogstatsd.port is None - assert tracer._writer.dogstatsd.socket_path == "/foo.sock" - - tracer = Tracer() - writer = AgentWriter("http://localhost:8126") - tracer._configure(writer=writer, dogstatsd_url="unix:///foo.sock") - assert tracer._writer.dogstatsd.host is None - assert tracer._writer.dogstatsd.port is None - assert tracer._writer.dogstatsd.socket_path == "/foo.sock" - def test_tracer_set_user(self): with self.trace("fake_span") as span: set_user( @@ -637,34 +610,17 @@ def test_tracer_set_user_propagation_string_error(self): @pytest.mark.subprocess(env=dict(DD_AGENT_PORT="", DD_AGENT_HOST="", DD_TRACE_AGENT_URL="")) def test_tracer_url(): - import pytest - import ddtrace - t = ddtrace.trace.Tracer() - assert t._writer.agent_url == "http://localhost:8126" - - t = ddtrace.trace.Tracer(url="http://foobar:12") - assert t._writer.agent_url == "http://foobar:12" - - t = ddtrace.trace.Tracer(url="unix:///foobar") - assert t._writer.agent_url == "unix:///foobar" - - t = ddtrace.trace.Tracer(url="http://localhost") - assert t._writer.agent_url == "http://localhost" - - t = ddtrace.trace.Tracer(url="https://localhost") - assert t._writer.agent_url == "https://localhost" - - with pytest.raises(ValueError) as e: - ddtrace.trace.Tracer(url="foo://foobar:12") - assert ( - str(e.value) == "Unsupported protocol 'foo' in intake URL 'foo://foobar:12'. Must be one of: http, https, unix" - ) + assert ddtrace.trace.tracer._writer.agent_url == "http://localhost:8126" +@pytest.mark.subprocess() def test_tracer_shutdown_no_timeout(): - t = ddtrace.trace.Tracer() + import mock + + from ddtrace.internal.writer import AgentWriter + from ddtrace.trace import tracer as t with mock.patch.object(AgentWriter, "stop") as mock_stop: with mock.patch.object(AgentWriter, "join") as mock_join: @@ -674,8 +630,12 @@ def test_tracer_shutdown_no_timeout(): mock_join.assert_not_called() +@pytest.mark.subprocess() def test_tracer_configure_writer_stop_unstarted(): - t = ddtrace.trace.Tracer() + import mock + + from ddtrace.trace import tracer as t + t._writer = mock.Mock(wraps=t._writer) orig_writer = t._writer @@ -684,8 +644,12 @@ def test_tracer_configure_writer_stop_unstarted(): assert orig_writer.stop.called +@pytest.mark.subprocess() def test_tracer_configure_writer_stop_started(): - t = ddtrace.trace.Tracer() + import mock + + from ddtrace.trace import tracer as t + t._writer = mock.Mock(wraps=t._writer) orig_writer = t._writer @@ -697,8 +661,12 @@ def test_tracer_configure_writer_stop_started(): orig_writer.stop.assert_called_once_with() +@pytest.mark.subprocess() def test_tracer_shutdown_timeout(): - t = ddtrace.trace.Tracer() + import mock + + from ddtrace.internal.writer import AgentWriter + from ddtrace.trace import tracer as t with mock.patch.object(AgentWriter, "stop") as mock_stop: with t.trace("something"): @@ -708,54 +676,22 @@ def test_tracer_shutdown_timeout(): mock_stop.assert_called_once_with(2) +@pytest.mark.subprocess( + err=b"Spans started after the tracer has been shut down will not be sent to the Datadog Agent.\n", +) def test_tracer_shutdown(): - t = ddtrace.trace.Tracer() - t.shutdown() - - with mock.patch.object(AgentWriter, "write") as mock_write: - with t.trace("something"): - pass - - mock_write.assert_not_called() + import mock + from ddtrace.internal.writer import AgentWriter + from ddtrace.trace import tracer as t -def test_tracer_shutdown_warning(): - t = ddtrace.trace.Tracer() t.shutdown() - with mock.patch.object(logging.Logger, "warning") as mock_logger: + with mock.patch.object(AgentWriter, "write") as mock_write: with t.trace("something"): pass - mock_logger.assert_has_calls( - [ - mock.call("Spans started after the tracer has been shut down will not be sent to the Datadog Agent."), - ] - ) - - -def test_tracer_dogstatsd_url(): - t = ddtrace.trace.Tracer() - assert t._writer.dogstatsd.host == "localhost" - assert t._writer.dogstatsd.port == 8125 - - t = ddtrace.trace.Tracer(dogstatsd_url="foobar:12") - assert t._writer.dogstatsd.host == "foobar" - assert t._writer.dogstatsd.port == 12 - - t = ddtrace.trace.Tracer(dogstatsd_url="udp://foobar:12") - assert t._writer.dogstatsd.host == "foobar" - assert t._writer.dogstatsd.port == 12 - - t = ddtrace.trace.Tracer(dogstatsd_url="/var/run/statsd.sock") - assert t._writer.dogstatsd.socket_path == "/var/run/statsd.sock" - - t = ddtrace.trace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock") - assert t._writer.dogstatsd.socket_path == "/var/run/statsd.sock" - - with pytest.raises(ValueError) as e: - t = ddtrace.trace.Tracer(dogstatsd_url="foo://foobar:12") - assert str(e) == "Unknown url format for `foo://foobar:12`" + mock_write.assert_not_called() @pytest.mark.skip(reason="Fails to Pickle RateLimiter in the Tracer") @@ -811,7 +747,7 @@ def task(t, errors): def test_tracer_with_version(): - t = ddtrace.trace.Tracer() + t = DummyTracer() # With global `config.version` defined with override_global_config(dict(version="1.2.3")): @@ -838,7 +774,7 @@ def test_tracer_with_version(): def test_tracer_with_env(): - t = ddtrace.trace.Tracer() + t = DummyTracer() # With global `config.env` defined with override_global_config(dict(env="prod")): @@ -960,33 +896,13 @@ def test_version_service_mapping(self): def test_detect_agentless_env_with_lambda(self): assert in_aws_lambda() assert not has_aws_lambda_agent_extension() - tracer = Tracer() - assert isinstance(tracer._writer, LogWriter) - tracer._configure(enabled=True) - assert isinstance(tracer._writer, LogWriter) - - @run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func")) - def test_detect_agent_config_with_lambda_extension(self): - def mock_os_path_exists(path): - return path == "/opt/extensions/datadog-agent" - - assert in_aws_lambda() - - with mock.patch("os.path.exists", side_effect=mock_os_path_exists): - assert has_aws_lambda_agent_extension() - - tracer = Tracer() - assert isinstance(tracer._writer, AgentWriter) - assert tracer._writer._sync_mode - - tracer._configure(enabled=False) - assert isinstance(tracer._writer, AgentWriter) - assert tracer._writer._sync_mode + assert isinstance(ddtrace.tracer._writer, LogWriter) + ddtrace.tracer._configure(enabled=True) + assert isinstance(ddtrace.tracer._writer, LogWriter) @run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost")) def test_detect_agent_config(self): - tracer = Tracer() - assert isinstance(tracer._writer, AgentWriter) + assert isinstance(global_tracer._writer, AgentWriter) @run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2")) def test_dd_tags(self): @@ -1001,7 +917,7 @@ def test_dd_tags_invalid(self): @run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers")) def test_tags_from_DD_TAGS(self): - t = ddtrace.trace.Tracer() + t = DummyTracer() with t.trace("test") as s: assert s.service == "mysvc" assert s.get_tag("env") == "myenv" @@ -1016,33 +932,29 @@ def test_tags_from_DD_TAGS(self): ) ) def test_tags_from_DD_TAGS_precedence(self): - t = ddtrace.trace.Tracer() - with t.trace("test") as s: + with global_tracer.trace("test") as s: assert s.service == "svc" assert s.get_tag("env") == "env" assert s.get_tag("version") == "0.123" @run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers")) def test_tags_from_DD_TAGS_override(self): - t = ddtrace.trace.Tracer() ddtrace.config.env = "env" ddtrace.config.service = "service" ddtrace.config.version = "0.123" - with t.trace("test") as s: + with global_tracer.trace("test") as s: assert s.service == "service" assert s.get_tag("env") == "env" assert s.get_tag("version") == "0.123" def test_tracer_set_runtime_tags(): - t = ddtrace.trace.Tracer() - with t.start_span("foobar") as span: + with global_tracer.start_span("foobar") as span: pass assert len(span.get_tag("runtime-id")) - t2 = ddtrace.trace.Tracer() - with t2.start_span("foobaz") as span2: + with global_tracer.start_span("foobaz") as span2: pass assert span.get_tag("runtime-id") == span2.get_tag("runtime-id") @@ -1084,7 +996,7 @@ def test_tracer_runtime_tags_cross_execution(tracer): def test_start_span_hooks(): - t = ddtrace.trace.Tracer() + t = DummyTracer() result = {} @@ -1099,7 +1011,7 @@ def store_span(span): def test_deregister_start_span_hooks(): - t = ddtrace.trace.Tracer() + t = DummyTracer() result = {} @@ -1119,9 +1031,8 @@ def store_span(span): def test_enable(): import os - import ddtrace + from ddtrace.trace import tracer as t2 - t2 = ddtrace.trace.Tracer() if os.environ["DD_TRACE_ENABLED"] == "true": assert t2.enabled else: @@ -1170,7 +1081,7 @@ def thread_target(): def test_runtime_id_parent_only(): - tracer = ddtrace.trace.Tracer() + tracer = DummyTracer() # Parent spans should have runtime-id with tracer.trace("test") as s: @@ -1221,18 +1132,6 @@ def test_runtime_id_fork(): assert exit_code == 12 -def test_multiple_tracer_ctx(): - t1 = ddtrace.trace.Tracer() - t2 = ddtrace.trace.Tracer() - - with t1.trace("") as s1: - with t2.trace("") as s2: - pass - - assert s2.parent_id == s1.span_id - assert s2.trace_id == s1.trace_id - - def test_filters(tracer, test_spans): class FilterAll(object): def process_trace(self, trace): @@ -1413,12 +1312,10 @@ def _test_partial_flush(self): def test_unicode_config_vals(): - t = ddtrace.trace.Tracer() - with override_global_config(dict(version="😇", env="😇")): - with t.trace("1"): + with global_tracer.trace("1"): pass - t.shutdown() + global_tracer.flush() def test_ctx(tracer, test_spans): @@ -1664,45 +1561,25 @@ def override_service_mapping(service_mapping): ddtrace.config.service_mapping = {} # Test single mapping - with override_service_mapping("foo:bar"), ddtrace.trace.Tracer().trace("renaming", service="foo") as span: + with override_service_mapping("foo:bar"), global_tracer.trace("renaming", service="foo") as span: assert span.service == "bar" # Test multiple mappings - with override_service_mapping("foo:bar,sna:fu"), ddtrace.trace.Tracer().trace("renaming", service="sna") as span: + with override_service_mapping("foo:bar,sna:fu"), global_tracer.trace("renaming", service="sna") as span: assert span.service == "fu" # Test colliding mappings - with override_service_mapping("foo:bar,foo:foobar"), ddtrace.trace.Tracer().trace( - "renaming", service="foo" - ) as span: + with override_service_mapping("foo:bar,foo:foobar"), global_tracer.trace("renaming", service="foo") as span: assert span.service == "foobar" # Test invalid service mapping with override_service_mapping("foo;bar,sna:fu"): - with ddtrace.trace.Tracer().trace("passthru", service="foo") as _: + with global_tracer.trace("passthru", service="foo") as _: assert _.service == "foo" - with ddtrace.trace.Tracer().trace("renaming", "sna") as _: + with global_tracer.trace("renaming", "sna") as _: assert _.service == "fu" -@pytest.mark.subprocess(env=dict(DD_AGENT_PORT="", DD_AGENT_HOST="", DD_TRACE_AGENT_URL="")) -def test_configure_url_partial(): - import ddtrace - - tracer = ddtrace.trace.Tracer() - tracer._configure(hostname="abc") - assert tracer._writer.agent_url == "http://abc:8126" - tracer._configure(port=123) - assert tracer._writer.agent_url == "http://abc:123" - - tracer = ddtrace.trace.Tracer(url="http://abc") - assert tracer._writer.agent_url == "http://abc" - tracer._configure(port=123) - assert tracer._writer.agent_url == "http://abc:123" - tracer._configure(port=431) - assert tracer._writer.agent_url == "http://abc:431" - - @pytest.mark.subprocess(env={"DD_TRACE_AGENT_URL": "bad://localhost:1234"}) def test_bad_agent_url(): import pytest @@ -1910,16 +1787,16 @@ def test_fork_pid(): assert exit_code == 12 +@pytest.mark.subprocess def test_tracer_api_version(): - t = Tracer() - assert isinstance(t._writer._encoder, MsgpackEncoderV05) + from ddtrace.internal.encoding import MsgpackEncoderV05 + from ddtrace.trace import tracer as t - t._configure(api_version="v0.4") - assert isinstance(t._writer._encoder, MsgpackEncoderV04) + assert isinstance(t._writer._encoder, MsgpackEncoderV05) -@pytest.mark.parametrize("enabled", [True, False]) -def test_tracer_memory_leak_span_processors(enabled): +@pytest.mark.subprocess(parametrize={"DD_TRACE_ENABLED": ["true", "false"]}) +def test_tracer_memory_leak_span_processors(): """ Test whether the tracer or span processors will hold onto span references after the trace is complete. @@ -1927,16 +1804,20 @@ def test_tracer_memory_leak_span_processors(enabled): This is a regression test for the tracer not calling on_span_finish of SpanAggregator when the tracer was disabled and traces leaking. """ + import gc + import weakref + + from ddtrace.trace import TraceFilter + from ddtrace.trace import tracer as t + spans = weakref.WeakSet() # Filter to ensure we don't send the traces to the writer - class DropAllFilter: + class DropAllFilter(TraceFilter): def process_trace(self, trace): return None - t = Tracer() - t.enabled = enabled - t._configure(trace_processors=[DropAllFilter()]) + t.configure(trace_processors=[DropAllFilter()]) for _ in range(5): with t.trace("test") as span: @@ -1944,6 +1825,7 @@ def process_trace(self, trace): # Be sure to dereference the last Span held by the local variable `span` span = None + t.flush() # Force gc gc.collect() @@ -1984,11 +1866,9 @@ def test_finish_span_with_ancestors(tracer): assert span3.finished -def test_ctx_api(): +def test_ctx_api(tracer): from ddtrace.internal import core - tracer = Tracer() - assert core.get_item("key") is None with tracer.trace("root") as span: @@ -2010,21 +1890,6 @@ def test_ctx_api(): assert core.get_items(["appsec.key"]) == [None] -@pytest.mark.subprocess(parametrize={"IMPORT_DDTRACE_TRACER": ["true", "false"]}) -def test_import_ddtrace_tracer_not_module(): - import os - - import_ddtrace_tracer = os.environ["IMPORT_DDTRACE_TRACER"] == "true" - - if import_ddtrace_tracer: - import ddtrace.tracer # noqa: F401 - - from ddtrace.trace import Tracer - from ddtrace.trace import tracer - - assert isinstance(tracer, Tracer) - - @pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) @@ -2034,7 +1899,7 @@ def test_asm_standalone_configuration(sca_enabled, appsec_enabled, iast_enabled) with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): ddtrace.config._reset() - tracer = ddtrace.trace.Tracer() + tracer = DummyTracer() tracer._configure(appsec_enabled=appsec_enabled, iast_enabled=iast_enabled, appsec_standalone_enabled=True) if sca_enabled == "true": assert bool(ddtrace.config._sca_enabled) is True @@ -2053,10 +1918,9 @@ def test_asm_standalone_configuration(sca_enabled, appsec_enabled, iast_enabled) def test_gc_not_used_on_root_spans(): - tracer = ddtrace.trace.Tracer() gc.freeze() - with tracer.trace("test-event"): + with ddtrace.tracer.trace("test-event"): pass # There should be no more span objects lingering around. @@ -2072,25 +1936,39 @@ def test_gc_not_used_on_root_spans(): # print("--------------------") +@pytest.mark.subprocess(env=dict(AWS_LAMBDA_FUNCTION_NAME="my-func")) +def test_detect_agent_config_with_lambda_extension(): + import mock + + def mock_os_path_exists(path): + return path == "/opt/extensions/datadog-agent" + + with mock.patch("os.path.exists", side_effect=mock_os_path_exists): + import ddtrace + from ddtrace.internal.writer import AgentWriter + from ddtrace.trace import tracer + + assert ddtrace.internal.serverless.in_aws_lambda() + + assert ddtrace.internal.serverless.has_aws_lambda_agent_extension() + + assert isinstance(tracer._writer, AgentWriter) + assert tracer._writer._sync_mode + + tracer._configure(enabled=False) + assert isinstance(tracer._writer, AgentWriter) + assert tracer._writer._sync_mode + + @pytest.mark.subprocess() def test_multiple_tracer_instances(): - import warnings + import mock - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("always") - import ddtrace + import ddtrace - assert ddtrace.tracer is not None - for w in warns: - # Ensure the warning is not about multiple tracer instances is not logged when importing ddtrace - assert "Support for multiple Tracer instances is deprecated" not in str(w.message) - - warns.clear() - t = ddtrace.trace.Tracer() - # TODO: Update this assertion when the deprecation is removed and the tracer becomes a singleton - assert t is not ddtrace.tracer - assert len(warns) == 1 - assert ( - str(warns[0].message) == "Support for multiple Tracer instances is deprecated and will be " - "removed in version '3.0.0'. Use ddtrace.tracer instead." - ) + assert ddtrace.trace.tracer is not None + with mock.patch("ddtrace._trace.tracer.log") as log: + ddtrace.trace.Tracer() + log.error.assert_called_once_with( + "Multiple Tracer instances can not be initialized. " "Use ``ddtrace.trace.tracer`` instead." + ) diff --git a/tests/utils.py b/tests/utils.py index bc7acd68b84..da37ea88387 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -127,6 +127,7 @@ def override_global_config(values): "_x_datadog_tags_max_length", "_128_bit_trace_id_enabled", "_x_datadog_tags_enabled", + "_startup_logs_enabled", "_propagate_service", "env", "version", @@ -170,7 +171,7 @@ def override_global_config(values): ddtrace.config._subscriptions = [] # Grab the current values of all keys originals = dict((key, getattr(ddtrace.config, key)) for key in global_config_keys) - asm_originals = dict((key, getattr(ddtrace.settings.asm.config, key)) for key in asm_config_keys) + asm_originals = dict((key, getattr(asm_config, key)) for key in asm_config_keys) # Override from the passed in keys for key, value in values.items(): @@ -179,9 +180,9 @@ def override_global_config(values): # rebuild asm config from env vars and global config for key, value in values.items(): if key in asm_config_keys: - setattr(ddtrace.settings.asm.config, key, value) + setattr(asm_config, key, value) # If ddtrace.settings.asm.config has changed, check _asm_can_be_enabled again - ddtrace.settings.asm.config._eval_asm_can_be_enabled() + asm_config._eval_asm_can_be_enabled() try: core.dispatch("test.config.override") yield @@ -190,9 +191,9 @@ def override_global_config(values): for key, value in originals.items(): setattr(ddtrace.config, key, value) - ddtrace.settings.asm.config.reset() + asm_config.reset() for key, value in asm_originals.items(): - setattr(ddtrace.settings.asm.config, key, value) + setattr(asm_config, key, value) ddtrace.config._reset() ddtrace.config._subscriptions = subscriptions @@ -649,8 +650,8 @@ def configure(self, *args, **kwargs): self._configure(*args, **kwargs) def _configure(self, *args, **kwargs): - assert "writer" not in kwargs or isinstance( - kwargs["writer"], DummyWriterMixin + assert isinstance( + kwargs.get("writer"), (DummyWriterMixin, type(None)) ), "cannot configure writer of DummyTracer" if not kwargs.get("writer"):