diff --git a/benchmarks/bm/utils.py b/benchmarks/bm/utils.py index dd7b4991c57..13e99e8be74 100644 --- a/benchmarks/bm/utils.py +++ b/benchmarks/bm/utils.py @@ -65,7 +65,7 @@ def process_trace(self, trace): def drop_traces(tracer): - tracer.configure(settings={"FILTERS": [_DropTraces()]}) + tracer.configure(trace_processors=[_DropTraces()]) def drop_telemetry_events(): diff --git a/benchmarks/rate_limiter/scenario.py b/benchmarks/rate_limiter/scenario.py index 5210647ef89..3388af1cfb8 100644 --- a/benchmarks/rate_limiter/scenario.py +++ b/benchmarks/rate_limiter/scenario.py @@ -23,8 +23,8 @@ def _(loops): windows = [start + (i * self.time_window) for i in range(self.num_windows)] per_window = math.floor(loops / self.num_windows) - for window in windows: + for _ in windows: for _ in range(per_window): - rate_limiter.is_allowed(window) + rate_limiter.is_allowed() yield _ diff --git a/ddtrace/_trace/sampling_rule.py b/ddtrace/_trace/sampling_rule.py index 532a0b71f51..482a95d403a 100644 --- a/ddtrace/_trace/sampling_rule.py +++ b/ddtrace/_trace/sampling_rule.py @@ -8,8 +8,6 @@ from ddtrace.internal.glob_matching import GlobMatcher from ddtrace.internal.logger import get_logger from ddtrace.internal.utils.cache import cachedmethod -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate if TYPE_CHECKING: # pragma: no cover @@ -210,14 +208,12 @@ def choose_matcher(self, prop): # We currently support the ability to pass in a function, a regular expression, or a string # If a string is passed in we create a GlobMatcher to handle the matching if callable(prop) or isinstance(prop, pattern_type): - # deprecated: passing a function or a regular expression' - deprecate( - "Using methods or regular expressions for SamplingRule matching is deprecated. ", - message="Please move to passing in a string for Glob matching.", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, + log.error( + "Using methods or regular expressions for SamplingRule matching is not supported: %s ." + "Please move to passing in a string for Glob matching.", + str(prop), ) - return prop + return "None" # Name and Resource will never be None, but service can be, since we str() # whatever we pass into the GlobMatcher, we can just use its matching elif prop is None: diff --git a/ddtrace/_trace/span.py b/ddtrace/_trace/span.py index 446239a8091..c6eb4d4b72a 100644 --- a/ddtrace/_trace/span.py +++ b/ddtrace/_trace/span.py @@ -52,8 +52,6 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.sampling import SamplingMechanism from ddtrace.internal.sampling import set_sampling_decision_maker -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate _NUMERIC_TAGS = (_ANALYTICS_SAMPLE_RATE_KEY,) @@ -279,29 +277,6 @@ def duration(self) -> Optional[float]: def duration(self, value: float) -> None: self.duration_ns = int(value * 1e9) - @property - def sampled(self) -> Optional[bool]: - deprecate( - "span.sampled is deprecated and will be removed in a future version of the tracer.", - message="""span.sampled references the state of span.context.sampling_priority. - Please use span.context.sampling_priority instead to check if a span is sampled.""", - category=DDTraceDeprecationWarning, - ) - if self.context.sampling_priority is None: - # this maintains original span.sampled behavior, where all spans would start - # with span.sampled = True until sampling runs - return True - return self.context.sampling_priority > 0 - - @sampled.setter - def sampled(self, value: bool) -> None: - deprecate( - "span.sampled is deprecated and will be removed in a future version of the tracer.", - message="""span.sampled has a no-op setter. - Please use span.set_tag('manual.keep'/'manual.drop') to keep or drop spans.""", - category=DDTraceDeprecationWarning, - ) - def finish(self, finish_time: Optional[float] = None) -> None: """Mark the end time of the span and submit it to the tracer. If the span has already been finished don't do anything. diff --git a/ddtrace/_trace/tracer.py b/ddtrace/_trace/tracer.py index 87f312bb18c..7030ec823d6 100644 --- a/ddtrace/_trace/tracer.py +++ b/ddtrace/_trace/tracer.py @@ -24,6 +24,7 @@ from ddtrace._trace.processor import TraceProcessor from ddtrace._trace.processor import TraceSamplingProcessor from ddtrace._trace.processor import TraceTagsProcessor +from ddtrace._trace.provider import BaseContextProvider from ddtrace._trace.provider import DefaultContextProvider from ddtrace._trace.sampler import BasePrioritySampler from ddtrace._trace.sampler import BaseSampler @@ -200,7 +201,7 @@ def __init__( self, url: Optional[str] = None, dogstatsd_url: Optional[str] = None, - context_provider: Optional[DefaultContextProvider] = None, + context_provider: Optional[BaseContextProvider] = None, ) -> None: """ Create a new ``Tracer`` instance. A global tracer is already initialized @@ -328,28 +329,6 @@ def sample(self, span): else: log.error("No sampler available to sample span") - @property - def sampler(self): - deprecate( - "tracer.sampler is deprecated and will be removed.", - message="To manually sample call tracer.sample(span) instead.", - category=DDTraceDeprecationWarning, - ) - return self._sampler - - @sampler.setter - def sampler(self, value): - deprecate( - "Setting a custom sampler is deprecated and will be removed.", - message="""Please use DD_TRACE_SAMPLING_RULES to configure the sampler instead: - https://ddtrace.readthedocs.io/en/stable/configuration.html#DD_TRACE_SAMPLING_RULES""", - category=DDTraceDeprecationWarning, - ) - if asm_config._apm_opt_out: - log.warning("Cannot set a custom sampler with Standalone ASM mode") - return - self._sampler = value - def on_start_span(self, func: Callable) -> Callable: """Register a function to execute when a span start. @@ -441,21 +420,7 @@ def get_log_correlation_context(self, active: Optional[Union[Context, Span]] = N def configure( self, - enabled: Optional[bool] = None, - hostname: Optional[str] = None, - port: Optional[int] = None, - uds_path: Optional[str] = None, - https: Optional[bool] = None, - sampler: Optional[BaseSampler] = None, - context_provider: Optional[DefaultContextProvider] = None, - wrap_executor: Optional[Callable] = None, - priority_sampling: Optional[bool] = None, - settings: Optional[Dict[str, Any]] = None, - dogstatsd_url: Optional[str] = None, - writer: Optional[TraceWriter] = None, - partial_flush_enabled: Optional[bool] = None, - partial_flush_min_spans: Optional[int] = None, - api_version: Optional[str] = None, + context_provider: Optional[BaseContextProvider] = None, compute_stats_enabled: Optional[bool] = None, appsec_enabled: Optional[bool] = None, iast_enabled: Optional[bool] = None, @@ -472,58 +437,14 @@ def configure( :param bool appsec_standalone_enabled: When tracing is disabled ensures ASM support is still enabled. :param List[TraceProcessor] trace_processors: This parameter sets TraceProcessor (ex: TraceFilters). Trace processors are used to modify and filter traces based on certain criteria. - - :param bool enabled: If True, finished traces will be submitted to the API, else they'll be dropped. - This parameter is deprecated and will be removed. - :param str hostname: Hostname running the Trace Agent. This parameter is deprecated and will be removed. - :param int port: Port of the Trace Agent. This parameter is deprecated and will be removed. - :param str uds_path: The Unix Domain Socket path of the agent. This parameter is deprecated and will be removed. - :param bool https: Whether to use HTTPS or HTTP. This parameter is deprecated and will be removed. - :param object sampler: A custom Sampler instance, locally deciding to totally drop the trace or not. - This parameter is deprecated and will be removed. - :param object wrap_executor: callable that is used when a function is decorated with - ``Tracer.wrap()``. This is an advanced option that usually doesn't need to be changed - from the default value. This parameter is deprecated and will be removed. - :param priority_sampling: This parameter is deprecated and will be removed in a future version. - :param bool settings: This parameter is deprecated and will be removed. - :param str dogstatsd_url: URL for UDP or Unix socket connection to DogStatsD - This parameter is deprecated and will be removed. - :param TraceWriter writer: This parameter is deprecated and will be removed. - :param bool partial_flush_enabled: This parameter is deprecated and will be removed. - :param bool partial_flush_min_spans: This parameter is deprecated and will be removed. - :param str api_version: This parameter is deprecated and will be removed. - :param bool compute_stats_enabled: This parameter is deprecated and will be removed. """ - if settings is not None: - deprecate( - "Support for ``tracer.configure(...)`` with the settings parameter is deprecated", - message="Please use the trace_processors parameter instead of settings['FILTERS'].", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - trace_processors = (trace_processors or []) + (settings.get("FILTERS") or []) - return self._configure( - enabled, - hostname, - port, - uds_path, - https, - sampler, - context_provider, - wrap_executor, - priority_sampling, - trace_processors, - dogstatsd_url, - writer, - partial_flush_enabled, - partial_flush_min_spans, - api_version, - compute_stats_enabled, - appsec_enabled, - iast_enabled, - appsec_standalone_enabled, - True, + context_provider=context_provider, + trace_processors=trace_processors, + compute_stats_enabled=compute_stats_enabled, + appsec_enabled=appsec_enabled, + iast_enabled=iast_enabled, + appsec_standalone_enabled=appsec_standalone_enabled, ) def _configure( @@ -534,7 +455,7 @@ def _configure( uds_path: Optional[str] = None, https: Optional[bool] = None, sampler: Optional[BaseSampler] = None, - context_provider: Optional[DefaultContextProvider] = None, + context_provider: Optional[BaseContextProvider] = None, wrap_executor: Optional[Callable] = None, priority_sampling: Optional[bool] = None, trace_processors: Optional[List[TraceProcessor]] = None, @@ -547,48 +468,18 @@ def _configure( appsec_enabled: Optional[bool] = None, iast_enabled: Optional[bool] = None, appsec_standalone_enabled: Optional[bool] = None, - log_deprecations: bool = False, ) -> None: if enabled is not None: self.enabled = enabled - if log_deprecations: - deprecate( - "Enabling/Disabling tracing after application start is deprecated", - message="Please use DD_TRACE_ENABLED instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - - if priority_sampling is not None and log_deprecations: - deprecate( - "Disabling priority sampling is deprecated", - message="Calling `tracer.configure(priority_sampling=....) has no effect", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) if trace_processors is not None: self._user_trace_processors = trace_processors if partial_flush_enabled is not None: self._partial_flush_enabled = partial_flush_enabled - if log_deprecations: - deprecate( - "Configuring partial flushing after application start is deprecated", - message="Please use DD_TRACE_PARTIAL_FLUSH_ENABLED to enable/disable the partial flushing instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) if partial_flush_min_spans is not None: self._partial_flush_min_spans = partial_flush_min_spans - if log_deprecations: - deprecate( - "Configuring partial flushing after application start is deprecated", - message="Please use DD_TRACE_PARTIAL_FLUSH_MIN_SPANS to set the flushing threshold instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) if appsec_enabled is not None: asm_config._asm_enabled = appsec_enabled @@ -620,33 +511,11 @@ def _configure( if sampler is not None: self._sampler = sampler self._user_sampler = self._sampler - if log_deprecations: - deprecate( - "Configuring custom samplers is deprecated", - message="Please use DD_TRACE_SAMPLING_RULES to configure the sample rates instead", - category=DDTraceDeprecationWarning, - removal_version="3.0.0", - ) if dogstatsd_url is not None: - if log_deprecations: - deprecate( - "Configuring dogstatsd_url after application start is deprecated", - message="Please use DD_DOGSTATSD_URL instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) self._dogstatsd_url = dogstatsd_url if any(x is not None for x in [hostname, port, uds_path, https]): - if log_deprecations: - deprecate( - "Configuring tracer agent connection after application start is deprecated", - message="Please use DD_TRACE_AGENT_URL instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - # If any of the parts of the URL have updated, merge them with # the previous writer values. prev_url_parsed = compat.parse.urlparse(self._agent_url) @@ -670,13 +539,6 @@ def _configure( new_url = None if compute_stats_enabled is not None: - if log_deprecations: - deprecate( - "Configuring tracer stats computation after application start is deprecated", - message="Please use DD_TRACE_STATS_COMPUTATION_ENABLED instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) self._compute_stats = compute_stats_enabled try: @@ -685,14 +547,6 @@ def _configure( # It's possible the writer never got started pass - if api_version is not None and log_deprecations: - deprecate( - "Configuring Tracer API version after application start is deprecated", - message="Please use DD_TRACE_API_VERSION instead.", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) - if writer is not None: self._writer = writer elif any(x is not None for x in [new_url, api_version, sampler, dogstatsd_url, appsec_enabled]): @@ -754,12 +608,6 @@ def _configure( if wrap_executor is not None: self._wrap_executor = wrap_executor - if log_deprecations: - deprecate( - "Support for tracer.configure(...) with the wrap_executor parameter is deprecated", - version="3.0.0", - category=DDTraceDeprecationWarning, - ) self._generate_diagnostic_logs() @@ -1344,7 +1192,7 @@ def _handle_sampler_update(self, cfg: Config) -> None: and self._user_sampler ): # if we get empty configs from rc for both sample rate and rules, we should revert to the user sampler - self.sampler = self._user_sampler + self._sampler = self._user_sampler return if cfg._get_source("_trace_sample_rate") != "remote_config" and self._user_sampler: diff --git a/ddtrace/internal/datadog/profiling/stack_v2/include/sampler.hpp b/ddtrace/internal/datadog/profiling/stack_v2/include/sampler.hpp index 7050b6fcaa4..2493b35ca25 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/include/sampler.hpp +++ b/ddtrace/internal/datadog/profiling/stack_v2/include/sampler.hpp @@ -25,9 +25,6 @@ class Sampler // Parameters uint64_t echion_frame_cache_size = g_default_echion_frame_cache_size; - // Helper function; implementation of the echion sampling thread - void sampling_thread(const uint64_t seq_num); - // This is a singleton, so no public constructor Sampler(); @@ -37,7 +34,7 @@ class Sampler public: // Singleton instance static Sampler& get(); - void start(); + bool start(); void stop(); void register_thread(uint64_t id, uint64_t native_id, const char* name); void unregister_thread(uint64_t id); @@ -46,6 +43,7 @@ class Sampler PyObject* _asyncio_scheduled_tasks, PyObject* _asyncio_eager_tasks); void link_tasks(PyObject* parent, PyObject* child); + void sampling_thread(const uint64_t seq_num); // The Python side dynamically adjusts the sampling rate based on overhead, so we need to be able to update our // own intervals accordingly. Rather than a preemptive measure, we assume the rate is ~fairly stable and just diff --git a/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp b/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp index 7ad9ad692b2..656e9c05c22 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp +++ b/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp @@ -10,6 +10,52 @@ using namespace Datadog; +// Helper class for spawning a std::thread with control over its default stack size +#ifdef __linux__ +#include +#include + +struct ThreadArgs +{ + Sampler* sampler; + uint64_t seq_num; +}; + +void* +call_sampling_thread(void* args) +{ + ThreadArgs thread_args = *static_cast(args); + delete static_cast(args); // no longer needed, dynamic alloc + Sampler* sampler = thread_args.sampler; + sampler->sampling_thread(thread_args.seq_num); + return nullptr; +} + +pthread_t +create_thread_with_stack(size_t stack_size, Sampler* sampler, uint64_t seq_num) +{ + pthread_attr_t attr; + if (pthread_attr_init(&attr) != 0) { + return 0; + } + if (stack_size > 0) { + pthread_attr_setstacksize(&attr, stack_size); + } + + pthread_t thread_id; + ThreadArgs* thread_args = new ThreadArgs{ sampler, seq_num }; + int ret = pthread_create(&thread_id, &attr, call_sampling_thread, thread_args); + + pthread_attr_destroy(&attr); + + if (ret != 0) { + delete thread_args; // usually deleted in the thread, but need to clean it up here + return 0; + } + return thread_id; +} +#endif + void Sampler::sampling_thread(const uint64_t seq_num) { @@ -134,7 +180,7 @@ Sampler::unregister_thread(uint64_t id) thread_info_map.erase(id); } -void +bool Sampler::start() { static std::once_flag once; @@ -143,8 +189,22 @@ Sampler::start() // Launch the sampling thread. // Thread lifetime is bounded by the value of the sequence number. When it is changed from the value the thread was // launched with, the thread will exit. - std::thread t(&Sampler::sampling_thread, this, ++thread_seq_num); - t.detach(); +#ifdef __linux__ + // We might as well get the default stack size and use that + rlimit stack_sz = {}; + getrlimit(RLIMIT_STACK, &stack_sz); + if (create_thread_with_stack(stack_sz.rlim_cur, this, ++thread_seq_num) == 0) { + return false; + } +#else + try { + std::thread t(&Sampler::sampling_thread, this, ++thread_seq_num); + t.detach(); + } catch (const std::exception& e) { + return false; + } +#endif + return true; } void diff --git a/ddtrace/internal/datadog/profiling/stack_v2/src/stack_v2.cpp b/ddtrace/internal/datadog/profiling/stack_v2/src/stack_v2.cpp index c56b5524bcd..2e562943333 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/src/stack_v2.cpp +++ b/ddtrace/internal/datadog/profiling/stack_v2/src/stack_v2.cpp @@ -21,8 +21,10 @@ _stack_v2_start(PyObject* self, PyObject* args, PyObject* kwargs) } Sampler::get().set_interval(min_interval_s); - Sampler::get().start(); - Py_RETURN_NONE; + if (Sampler::get().start()) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; } // Bypasses the old-style cast warning with an unchecked helper function diff --git a/ddtrace/internal/rate_limiter.py b/ddtrace/internal/rate_limiter.py index 0a97a6a7abc..9b514e5ff32 100644 --- a/ddtrace/internal/rate_limiter.py +++ b/ddtrace/internal/rate_limiter.py @@ -9,9 +9,6 @@ from typing import Callable # noqa:F401 from typing import Optional # noqa:F401 -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate - class RateLimiter(object): """ @@ -57,26 +54,18 @@ def __init__(self, rate_limit: int, time_window: float = 1e9): self._lock = threading.Lock() - def is_allowed(self, timestamp_ns: Optional[int] = None) -> bool: + def is_allowed(self) -> bool: """ Check whether the current request is allowed or not This method will also reduce the number of available tokens by 1 - :param int timestamp_ns: timestamp in nanoseconds for the current request. :returns: Whether the current request is allowed or not :rtype: :obj:`bool` """ - if timestamp_ns is not None: - deprecate( - "The `timestamp_ns` parameter is deprecated and will be removed in a future version." - "Ratelimiter will use the current time.", - category=DDTraceDeprecationWarning, - ) - # rate limits are tested and mocked in pytest so we need to compute the timestamp here # (or move the unit tests to rust) - timestamp_ns = timestamp_ns or time.monotonic_ns() + timestamp_ns = time.monotonic_ns() allowed = self._is_allowed(timestamp_ns) # Update counts used to determine effective rate self._update_rate_counts(allowed, timestamp_ns) diff --git a/ddtrace/internal/tracemethods.py b/ddtrace/internal/tracemethods.py index 5328797c09f..456cca597e1 100644 --- a/ddtrace/internal/tracemethods.py +++ b/ddtrace/internal/tracemethods.py @@ -4,8 +4,6 @@ import wrapt from ddtrace.internal.logger import get_logger -from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning -from ddtrace.vendor.debtcollector import deprecate log = get_logger(__name__) @@ -65,102 +63,10 @@ def _parse_trace_methods(raw_dd_trace_methods: str) -> List[Tuple[str, str]]: return dd_trace_methods -def _parse_legacy_trace_methods(raw_dd_trace_methods: str) -> List[str]: - """ - Return a list of method names to trace based on the specification of - DD_TRACE_METHODS. - - Note that support for wildcard methods with [*] is not implemented. - - This square bracket notation will be deprecated in favor of the new ':' notation - TODO: This method can be deleted once the legacy syntax is officially deprecated - """ - if not raw_dd_trace_methods: - return [] - dd_trace_methods = [] - for qualified_methods in raw_dd_trace_methods.split(";"): - # Validate that methods are specified - if "[" not in qualified_methods or "]" not in qualified_methods: - log.warning( - ( - "Invalid DD_TRACE_METHODS: %s. " - "Methods must be specified in square brackets following the fully qualified module or class name." - ), - qualified_methods, - ) - return [] - - # Store the prefix of the qualified method name (eg. for "foo.bar.baz[qux,quux]", this is "foo.bar.baz") - qualified_method_prefix = qualified_methods.split("[")[0] - - if qualified_method_prefix == "__main__": - # __main__ cannot be used since the __main__ that exists now is not the same as the __main__ that the user - # application will have. __main__ when sitecustomize module is run is the builtin __main__. - log.warning( - "Invalid DD_TRACE_METHODS: %s. Methods cannot be traced on the __main__ module.", qualified_methods - ) - return [] - - # Get the class or module name of the method (eg. for "foo.bar.baz[qux,quux]", this is "baz[qux,quux]") - class_or_module_with_methods = qualified_methods.split(".")[-1] - - # Strip off the leading 'moduleOrClass[' and trailing ']' - methods = class_or_module_with_methods.split("[")[1] - methods = methods[:-1] - - # Add the methods to the list of methods to trace - for method in methods.split(","): - if not str.isidentifier(method): - log.warning( - "Invalid method name: %r. %s", - method, - ( - "You might have a trailing comma." - if method == "" - else "Method names must be valid Python identifiers." - ), - ) - return [] - dd_trace_methods.append("%s.%s" % (qualified_method_prefix, method)) - return dd_trace_methods - - def _install_trace_methods(raw_dd_trace_methods: str) -> None: """Install tracing on the given methods.""" - if "[" in raw_dd_trace_methods: - deprecate( - "Using DD_TRACE_METHODS with the '[]' notation is deprecated", - message="Please use DD_TRACE_METHODS with the new ':' notation instead", - removal_version="3.0.0", - category=DDTraceDeprecationWarning, - ) - # Using legacy syntax - for qualified_method in _parse_legacy_trace_methods(raw_dd_trace_methods): - # We don't know if the method is a class method or a module method, so we need to assume it's a module - # and if the import fails then go a level up and try again. - base_module_guess = ".".join(qualified_method.split(".")[:-1]) - method_name = qualified_method.split(".")[-1] - module = None - - while base_module_guess: - try: - module = __import__(base_module_guess) - except ImportError: - # Add the class to the method name - method_name = "%s.%s" % (base_module_guess.split(".")[-1], method_name) - base_module_guess = ".".join(base_module_guess.split(".")[:-1]) - else: - break - - if module is None: - log.warning("Could not import module for %r", qualified_method) - continue - - trace_method(base_module_guess, method_name) - else: - # Using updated syntax, no need to try to import - for module_name, method_name in _parse_trace_methods(raw_dd_trace_methods): - trace_method(module_name, method_name) + for module_name, method_name in _parse_trace_methods(raw_dd_trace_methods): + trace_method(module_name, method_name) def trace_method(module, method_name): diff --git a/ddtrace/opentracer/tracer.py b/ddtrace/opentracer/tracer.py index ca10cb8125a..65d1b95b314 100644 --- a/ddtrace/opentracer/tracer.py +++ b/ddtrace/opentracer/tracer.py @@ -18,7 +18,6 @@ from ddtrace.trace import Context as DatadogContext # noqa:F401 from ddtrace.trace import Span as DatadogSpan from ddtrace.trace import Tracer as DatadogTracer -from ddtrace.vendor.debtcollector import deprecate from ..internal.logger import get_logger from .propagation import HTTPPropagator @@ -55,7 +54,7 @@ def __init__( service_name: Optional[str] = None, config: Optional[Dict[str, Any]] = None, scope_manager: Optional[ScopeManager] = None, - dd_tracer: Optional[DatadogTracer] = None, + _dd_tracer: Optional[DatadogTracer] = None, ) -> None: """Initialize a new Datadog opentracer. @@ -70,9 +69,6 @@ def __init__( here: https://github.com/opentracing/opentracing-python#scope-managers. If ``None`` is provided, defaults to :class:`opentracing.scope_managers.ThreadLocalScopeManager`. - :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This - parameter is deprecated and will be removed in v3.0.0. The - to the global tracer (``ddtrace.tracer``) should always be used. """ # Merge the given config with the default into a new dict self._config = DEFAULT_CONFIG.copy() @@ -100,14 +96,7 @@ def __init__( self._scope_manager = scope_manager or ThreadLocalScopeManager() dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager) - if dd_tracer is not None: - deprecate( - "The ``dd_tracer`` parameter is deprecated", - message="The global tracer (``ddtrace.tracer``) will be used instead.", - removal_version="3.0.0", - ) - - self._dd_tracer = dd_tracer or ddtrace.tracer + self._dd_tracer = _dd_tracer or ddtrace.tracer self._dd_tracer.set_tags(self._config.get(keys.GLOBAL_TAGS)) # type: ignore[arg-type] trace_processors = None if keys.SETTINGS in self._config: @@ -121,7 +110,7 @@ def __init__( trace_processors=trace_processors, priority_sampling=self._config.get(keys.PRIORITY_SAMPLING), uds_path=self._config.get(keys.UDS_PATH), - context_provider=dd_context_provider, # type: ignore[arg-type] + context_provider=dd_context_provider, ) self._propagators = { Format.HTTP_HEADERS: HTTPPropagator, diff --git a/docs/releasenotes/notes/fix-stackv2-musl-stack-size-7c265de9939ce2ce.yaml b/docs/releasenotes/notes/fix-stackv2-musl-stack-size-7c265de9939ce2ce.yaml new file mode 100644 index 00000000000..cb226c73c34 --- /dev/null +++ b/docs/releasenotes/notes/fix-stackv2-musl-stack-size-7c265de9939ce2ce.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where Profiling native threads would respect the musl libc + default stack size, which could cause stack overflows in certain + configurations. diff --git a/releasenotes/notes/remove-tracing-attrs-3-0-5743fa668289d5bc.yaml b/releasenotes/notes/remove-tracing-attrs-3-0-5743fa668289d5bc.yaml new file mode 100644 index 00000000000..35ee9378801 --- /dev/null +++ b/releasenotes/notes/remove-tracing-attrs-3-0-5743fa668289d5bc.yaml @@ -0,0 +1,15 @@ +--- +upgrade: + - | + tracer: Removes deprecated parameters from ``Tracer.configure(...)`` method and removes the ``Tracer.sampler`` attribute. + - | + tracing: Drops support for multiple tracer instances, ``ddtrace.trace.Tracer`` can not be reinitialized. + - | + span: Removes the deprecated ``Span.sampled`` property + - | + sampling: Drops support for configuring sampling rules using functions and regex in the ``ddtrace.tracer.sampler.rules[].choose_matcher(...)`` method + and removes the ``timestamp_ns`` parameter from ``ddtrace.internal.rate_limiter.RateLimiter.is_allowed()``. + - | + configurations: Drops support for configuring ``DD_TRACE_METHODS`` with the '[]' notation. Ensure DD_TRACE_METHODS use the ':' notation instead". + - | + opentracing: Removes the deprecated ``ddtracer`` parameter from ``ddtrace.opentracer.tracer.Tracer()``. \ No newline at end of file diff --git a/src/native/Cargo.lock b/src/native/Cargo.lock index bbf189de0c2..a37cf6f4254 100644 --- a/src/native/Cargo.lock +++ b/src/native/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "anyhow" @@ -35,7 +35,7 @@ dependencies = [ ] [[package]] -name = "ddtrace-core" +name = "ddtrace-native" version = "0.1.0" dependencies = [ "datadog-ddsketch", diff --git a/tests/integration/test_tracemethods.py b/tests/integration/test_tracemethods.py index 15129c56161..7353c12182a 100644 --- a/tests/integration/test_tracemethods.py +++ b/tests/integration/test_tracemethods.py @@ -27,14 +27,10 @@ "mod.mod2.mod3:Class.test_method,Class.test_method2", [("mod.mod2.mod3", "Class.test_method"), ("mod.mod2.mod3", "Class.test_method2")], ), - ("module[method1, method2]", []), ("module", []), ("module.", []), ("module.method", []), - ("module.method[m1,m2,]", []), ("module.method;module.method", []), - ("module.method[m1];module.method[m1,m2,]", []), - ("module.method[[m1]", []), ], ) def test_trace_methods_parse(dd_trace_methods: str, expected_output: List[Tuple[str, str]]): @@ -43,37 +39,6 @@ def test_trace_methods_parse(dd_trace_methods: str, expected_output: List[Tuple[ assert _parse_trace_methods(dd_trace_methods) == expected_output -def test_legacy_trace_methods_parse(): - from ddtrace.internal.tracemethods import _parse_legacy_trace_methods - - assert _parse_legacy_trace_methods("") == [] - assert _parse_legacy_trace_methods("module[method1]") == ["module.method1"] - assert _parse_legacy_trace_methods("module[method1,method2]") == ["module.method1", "module.method2"] - assert _parse_legacy_trace_methods("module[method1,method2];mod2[m1,m2]") == [ - "module.method1", - "module.method2", - "mod2.m1", - "mod2.m2", - ] - assert _parse_legacy_trace_methods("mod.submod[m1,m2,m3]") == ["mod.submod.m1", "mod.submod.m2", "mod.submod.m3"] - assert _parse_legacy_trace_methods("mod.submod.subsubmod[m1,m2]") == [ - "mod.submod.subsubmod.m1", - "mod.submod.subsubmod.m2", - ] - assert _parse_legacy_trace_methods("mod.mod2.mod3.Class[test_method,test_method2]") == [ - "mod.mod2.mod3.Class.test_method", - "mod.mod2.mod3.Class.test_method2", - ] - assert _parse_legacy_trace_methods("module[method1, method2]") == [] - assert _parse_legacy_trace_methods("module") == [] - assert _parse_legacy_trace_methods("module.") == [] - assert _parse_legacy_trace_methods("module.method") == [] - assert _parse_legacy_trace_methods("module.method[m1,m2,]") == [] - assert _parse_legacy_trace_methods("module.method;module.method") == [] - assert _parse_legacy_trace_methods("module.method[m1];module.method[m1,m2,]") == [] - assert _parse_legacy_trace_methods("module.method[[m1]") == [] - - def _test_method(): pass @@ -105,9 +70,9 @@ def test_method(self): ddtrace_run=True, env=dict( DD_TRACE_METHODS=( - "tests.integration.test_tracemethods[_test_method,_test_method2];" - "tests.integration.test_tracemethods._Class[test_method,test_method2];" - "tests.integration.test_tracemethods._Class.NestedClass[test_method]" + "tests.integration.test_tracemethods:_test_method,_test_method2;" + "tests.integration.test_tracemethods:_Class.test_method,_Class.test_method2;" + "tests.integration.test_tracemethods:_Class.NestedClass.test_method" ) ), ) @@ -139,8 +104,8 @@ async def _async_test_method2(): def test_ddtrace_run_trace_methods_async(ddtrace_run_python_code_in_subprocess): env = os.environ.copy() env["DD_TRACE_METHODS"] = ( - "tests.integration.test_tracemethods[_async_test_method,_async_test_method2];" - "tests.integration.test_tracemethods._Class[async_test_method]" + "tests.integration.test_tracemethods:_async_test_method,_async_test_method2;" + "tests.integration.test_tracemethods:_Class.async_test_method" ) tests_dir = os.path.dirname(os.path.dirname(__file__)) env["PYTHONPATH"] = os.pathsep.join([tests_dir, env.get("PYTHONPATH", "")]) diff --git a/tests/opentracer/core/test_dd_compatibility.py b/tests/opentracer/core/test_dd_compatibility.py index 4ba14b0618f..c68b5ca6d6c 100644 --- a/tests/opentracer/core/test_dd_compatibility.py +++ b/tests/opentracer/core/test_dd_compatibility.py @@ -15,14 +15,6 @@ def test_ottracer_uses_global_ddtracer(self): tracer = ddtrace.opentracer.Tracer() assert tracer._dd_tracer is ddtrace.tracer - def test_custom_ddtracer(self): - """A user should be able to specify their own Datadog tracer instance if - they wish. - """ - custom_dd_tracer = ddtrace.trace.Tracer() - tracer = ddtrace.opentracer.Tracer(dd_tracer=custom_dd_tracer) - assert tracer._dd_tracer is custom_dd_tracer - def test_ot_dd_global_tracers(self, global_tracer): """Ensure our test function opentracer_init() prep""" ot_tracer = global_tracer diff --git a/tests/opentracer/core/test_tracer.py b/tests/opentracer/core/test_tracer.py index a0a18ff0dd8..f5534c8f1b0 100644 --- a/tests/opentracer/core/test_tracer.py +++ b/tests/opentracer/core/test_tracer.py @@ -15,8 +15,6 @@ from ddtrace.opentracer.span_context import SpanContext from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID from ddtrace.settings import ConfigException -from ddtrace.trace import Tracer as DDTracer -from tests.utils import override_global_config class TestTracerConfig(object): @@ -69,12 +67,6 @@ def test_invalid_config_key(self): assert ["enabeld", "setttings"] in str(ce_info) # codespell:ignore assert tracer is not None - def test_ddtrace_fallback_config(self): - """Ensure datadog configuration is used by default.""" - with override_global_config(dict(_tracing_enabled=False)): - tracer = Tracer(dd_tracer=DDTracer()) - assert tracer._dd_tracer.enabled is False - def test_global_tags(self): """Global tags should be passed from the opentracer to the tracer.""" config = { diff --git a/tests/opentracer/utils.py b/tests/opentracer/utils.py index 6a34052a385..85b84865ad8 100644 --- a/tests/opentracer/utils.py +++ b/tests/opentracer/utils.py @@ -7,5 +7,5 @@ def init_tracer(service_name, dd_tracer, scope_manager=None): It accepts a Datadog tracer that should be the same one used for testing. """ - ot_tracer = Tracer(service_name, dd_tracer=dd_tracer, scope_manager=scope_manager) + ot_tracer = Tracer(service_name, scope_manager=scope_manager, _dd_tracer=dd_tracer) return ot_tracer diff --git a/tests/smoke_test.py b/tests/smoke_test.py index f940d1ac9d2..0281a15a58d 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -91,9 +91,8 @@ def emit(self, record): platform.system() == "Windows" # libdatadog x86_64-apple-darwin has not yet been integrated to dd-trace-py or (platform.system() == "Darwin" and platform.machine() == "x86_64") - # echion crashes on musl linux with Python 3.12 for both x86_64 and - # aarch64 - or (platform.system() == "Linux" and sys.version_info[:2] == (3, 12) and platform.libc_ver()[0] != "glibc") + # echion only works with 3.8+ + or sys.version_info < (3, 8, 0) ): orig_env = os.environ.copy() copied_env = copy.deepcopy(orig_env) diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-None].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-None].json deleted file mode 100644 index 65eec00d960..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-None].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "ddtrace_subprocess_dir", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7100000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "8c92d3e850d9413593bf481d805039d1" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20673 - }, - "duration": 21745000, - "start": 1701268849462298000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 2999000, - "start": 1701268849479960000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v0].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v0].json deleted file mode 100644 index d6417fb5667..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v0].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "ddtrace_subprocess_dir", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7200000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "675032183b244929ba8c3a0a1c0021e5" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20696 - }, - "duration": 20412000, - "start": 1701268850764763000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "ddtrace_subprocess_dir", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 3134000, - "start": 1701268850780901000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v1].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v1].json deleted file mode 100644 index 979ea768ef5..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[None-v1].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "ddtrace_subprocess_dir", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7400000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "1f3499a720954236be60cf0fece4246c" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20714 - }, - "duration": 19970000, - "start": 1701268852029562000 - }, - { - "name": "http.client.request", - "service": "ddtrace_subprocess_dir", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.peer.service.source": "out.host", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "peer.service": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1 - }, - "duration": 2897000, - "start": 1701268852045569000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-None].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-None].json deleted file mode 100644 index a80c1218caf..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-None].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "mysvc", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7500000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "1244eea37568412fb5bdedf9c37ed48a" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20736 - }, - "duration": 19953000, - "start": 1701268853284736000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "mysvc", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 2837000, - "start": 1701268853300833000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v0].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v0].json deleted file mode 100644 index f3f9c57f768..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v0].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "mysvc", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7600000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "12b4a711854c44f681695957b545dcf5" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20750 - }, - "duration": 25352000, - "start": 1701268854568669000 - }, - { - "name": "requests.request", - "service": "openai", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.base_service": "mysvc", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1 - }, - "duration": 3922000, - "start": 1701268854588758000 - }]] diff --git a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v1].json b/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v1].json deleted file mode 100644 index 0696ae54454..00000000000 --- a/tests/snapshots/tests.contrib.openai.test_openai_v0.test_integration_service_name[mysvc-v1].json +++ /dev/null @@ -1,77 +0,0 @@ -[[ - { - "name": "openai.request", - "service": "mysvc", - "resource": "createCompletion", - "trace_id": 0, - "span_id": 1, - "parent_id": 0, - "type": "", - "error": 0, - "meta": { - "_dd.p.dm": "-0", - "_dd.p.tid": "65674d7700000000", - "component": "openai", - "language": "python", - "openai.api_base": "https://api.openai.com/v1", - "openai.api_type": "open_ai", - "openai.organization.name": "datadog-4", - "openai.request.client": "OpenAI", - "openai.request.endpoint": "/v1/completions", - "openai.request.max_tokens": "10", - "openai.request.method": "POST", - "openai.request.model": "ada", - "openai.request.n": "2", - "openai.request.prompt.0": "Hello world", - "openai.request.stop": ".", - "openai.request.temperature": "0.8", - "openai.response.choices.0.finish_reason": "length", - "openai.response.choices.0.text": ", relax!\u201d I said to my laptop", - "openai.response.choices.1.finish_reason": "stop", - "openai.response.choices.1.text": " (1", - "openai.response.created": "1681852797", - "openai.response.id": "cmpl-76n1xLvRKv3mfjx7hJ41UHrHy9ar6", - "openai.response.model": "ada", - "openai.user.api_key": "sk-...key>", - "runtime-id": "03e7664126ea4fe99e0aefec4efd003c" - }, - "metrics": { - "_dd.measured": 1, - "_dd.top_level": 1, - "_dd.tracer_kr": 1.0, - "_sampling_priority_v1": 1, - "openai.organization.ratelimit.requests.limit": 3000, - "openai.organization.ratelimit.requests.remaining": 2999, - "openai.organization.ratelimit.tokens.limit": 250000, - "openai.organization.ratelimit.tokens.remaining": 249979, - "process_id": 20772 - }, - "duration": 19966000, - "start": 1701268855885252000 - }, - { - "name": "http.client.request", - "service": "mysvc", - "resource": "POST /v1/completions", - "trace_id": 0, - "span_id": 2, - "parent_id": 1, - "type": "http", - "error": 0, - "meta": { - "_dd.peer.service.source": "out.host", - "component": "requests", - "http.method": "POST", - "http.status_code": "200", - "http.url": "https://api.openai.com/v1/completions", - "http.useragent": "OpenAI/v1 PythonBindings/0.27.2", - "out.host": "api.openai.com", - "peer.service": "api.openai.com", - "span.kind": "client" - }, - "metrics": { - "_dd.measured": 1 - }, - "duration": 2849000, - "start": 1701268855901267000 - }]] diff --git a/tests/tracer/test_sampler.py b/tests/tracer/test_sampler.py index 813dc1be439..22620d8184b 100644 --- a/tests/tracer/test_sampler.py +++ b/tests/tracer/test_sampler.py @@ -1,6 +1,5 @@ from __future__ import division -import re import unittest import mock @@ -250,7 +249,7 @@ def test_sampling_rule_init_defaults(): def test_sampling_rule_init(): - a_regex = re.compile(r"\.request$") + a_regex = "*request" a_string = "my-service" rule = SamplingRule( @@ -261,7 +260,7 @@ def test_sampling_rule_init(): assert rule.sample_rate == 0.0, "SamplingRule should store the rate it's initialized with" assert rule.service.pattern == a_string, "SamplingRule should store the service it's initialized with" - assert rule.name == a_regex, "SamplingRule should store the name regex it's initialized with" + assert rule.name.pattern == a_regex, "SamplingRule should store the name regex it's initialized with" @pytest.mark.parametrize( @@ -272,38 +271,13 @@ def test_sampling_rule_init(): (SamplingRule(sample_rate=0.0), SamplingRule(sample_rate=0.0), True), (SamplingRule(sample_rate=0.5), SamplingRule(sample_rate=1.0), False), (SamplingRule(sample_rate=1.0, service="my-svc"), SamplingRule(sample_rate=1.0, service="my-svc"), True), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - True, - ), (SamplingRule(sample_rate=1.0, service="my-svc"), SamplingRule(sample_rate=1.0, service="other-svc"), False), (SamplingRule(sample_rate=1.0, service="my-svc"), SamplingRule(sample_rate=0.5, service="my-svc"), False), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - SamplingRule(sample_rate=0.5, service=re.compile("my-svc")), - False, - ), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc")), - SamplingRule(sample_rate=1.0, service=re.compile("other")), - False, - ), ( SamplingRule(sample_rate=1.0, name="span.name"), SamplingRule(sample_rate=1.0, name="span.name"), True, ), - ( - SamplingRule(sample_rate=1.0, name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, name=re.compile("span.name")), - True, - ), - ( - SamplingRule(sample_rate=1.0, name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, name=re.compile("span.other")), - False, - ), ( SamplingRule(sample_rate=1.0, name="span.name"), SamplingRule(sample_rate=0.5, name="span.name"), @@ -316,16 +290,6 @@ def test_sampling_rule_init(): SamplingRule(sample_rate=1.0, service="my-svc", name="span.name"), True, ), - ( - SamplingRule(sample_rate=1.0, service="my-svc", name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, service="my-svc", name=re.compile("span.name")), - True, - ), - ( - SamplingRule(sample_rate=1.0, service=re.compile("my-svc"), name=re.compile("span.name")), - SamplingRule(sample_rate=1.0, service=re.compile("my-svc"), name=re.compile("span.name")), - True, - ), ( SamplingRule(sample_rate=1.0, service="my-svc", name="span.name"), SamplingRule(sample_rate=0.5, service="my-svc", name="span.name"), @@ -491,15 +455,6 @@ def test_sampling_rule_init_via_env(): ("test.span", None, False), ("test.span", "test.span", True), ("test.span", "test_span", False), - ("test.span", re.compile(r"^test\.span$"), True), - ("test_span", re.compile(r"^test.span$"), True), - ("test.span", re.compile(r"^test_span$"), False), - ("test.span", re.compile(r"test"), True), - ("test.span", re.compile(r"test\.span|another\.span"), True), - ("another.span", re.compile(r"test\.span|another\.span"), True), - ("test.span", lambda name: "span" in name, True), - ("test.span", lambda name: "span" not in name, False), - ("test.span", lambda name: 1 / 0, False), ] ], ) @@ -518,20 +473,8 @@ def test_sampling_rule_matches_name(span, rule, span_expected_to_match_rule): ("my-service", None, False), (None, "tests.tracer", True), ("tests.tracer", "my-service", False), - ("tests.tracer", re.compile(r"my-service"), False), - ("tests.tracer", lambda service: "service" in service, False), ("my-service", "my-service", True), ("my-service", "my_service", False), - ("my-service", re.compile(r"^my-"), True), - ("my_service", re.compile(r"^my[_-]"), True), - ("my-service", re.compile(r"^my_"), False), - ("my-service", re.compile(r"my-service"), True), - ("my-service", re.compile(r"my"), True), - ("my-service", re.compile(r"my-service|another-service"), True), - ("another-service", re.compile(r"my-service|another-service"), True), - ("my-service", lambda service: "service" in service, True), - ("my-service", lambda service: "service" not in service, False), - ("my-service", lambda service: 1 / 0, False), ] ], ) @@ -553,7 +496,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=1, name="test.span", - service=re.compile(r"^my-"), + service="my-*", ), True, ), @@ -567,7 +510,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=0, name="test.span", - service=re.compile(r"^my-"), + service="my-*", ), True, ), @@ -580,7 +523,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=1, name="test_span", - service=re.compile(r"^my-"), + service="my-*", ), False, ), @@ -593,7 +536,7 @@ def test_sampling_rule_matches_service(span, rule, span_expected_to_match_rule): SamplingRule( sample_rate=1, name="test.span", - service=re.compile(r"^service-"), + service="service-", ), False, ), @@ -605,26 +548,6 @@ def test_sampling_rule_matches(span, rule, span_expected_to_match_rule): ) -def test_sampling_rule_matches_exception(): - def pattern(prop): - raise Exception("an error occurred") - - rule = SamplingRule(sample_rate=1.0, name=pattern) - span = create_span(name="test.span") - - with mock.patch("ddtrace._trace.sampling_rule.log") as mock_log: - assert ( - rule.matches(span) is False - ), "SamplingRule should not match when its name pattern function throws an exception" - mock_log.warning.assert_called_once_with( - "%r pattern %r failed with %r", - rule, - pattern, - "test.span", - exc_info=True, - ) - - @pytest.mark.subprocess( parametrize={"DD_TRACE_128_BIT_TRACEID_GENERATION_ENABLED": ["true", "false"]}, )