diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ffc0a741fc..67b4fd3546 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.12 @@ -40,7 +40,7 @@ jobs: steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.12 @@ -59,7 +59,7 @@ jobs: steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.12 - name: Setup build cache @@ -90,7 +90,7 @@ jobs: steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.12 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f5e952d0de..68aeebf2b7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Get auth token id: token - uses: actions/create-github-app-token@a8d616148505b5069dccd32f177bb87d7f39123b # v2.1.1 + uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 with: app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }} private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }} diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml index 26a8bdb8bb..972df704e0 100644 --- a/.github/workflows/test-integrations-ai.yml +++ b/.github/workflows/test-integrations-ai.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -99,7 +99,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-cloud.yml b/.github/workflows/test-integrations-cloud.yml index 62e70d759d..6aeaea8c3a 100644 --- a/.github/workflows/test-integrations-cloud.yml +++ b/.github/workflows/test-integrations-cloud.yml @@ -43,7 +43,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -87,7 +87,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-common.yml b/.github/workflows/test-integrations-common.yml index 1c0c9b80d2..b682428dd1 100644 --- a/.github/workflows/test-integrations-common.yml +++ b/.github/workflows/test-integrations-common.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -67,7 +67,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-dbs.yml b/.github/workflows/test-integrations-dbs.yml index 2d6af43bc3..efa9f8db39 100644 --- a/.github/workflows/test-integrations-dbs.yml +++ b/.github/workflows/test-integrations-dbs.yml @@ -57,7 +57,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -107,7 +107,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -156,7 +156,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -206,7 +206,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-flags.yml b/.github/workflows/test-integrations-flags.yml index f744f514ee..d7baeeb870 100644 --- a/.github/workflows/test-integrations-flags.yml +++ b/.github/workflows/test-integrations-flags.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -79,7 +79,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-gevent.yml b/.github/workflows/test-integrations-gevent.yml index 382e6a5f15..9af6b4d7af 100644 --- a/.github/workflows/test-integrations-gevent.yml +++ b/.github/workflows/test-integrations-gevent.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -67,7 +67,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-graphql.yml b/.github/workflows/test-integrations-graphql.yml index 93675fb4fe..5c306dff3f 100644 --- a/.github/workflows/test-integrations-graphql.yml +++ b/.github/workflows/test-integrations-graphql.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -79,7 +79,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-misc.yml b/.github/workflows/test-integrations-misc.yml index e8937708bc..005e8395a2 100644 --- a/.github/workflows/test-integrations-misc.yml +++ b/.github/workflows/test-integrations-misc.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -87,7 +87,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-network.yml b/.github/workflows/test-integrations-network.yml index 867681d3a3..e34706ff09 100644 --- a/.github/workflows/test-integrations-network.yml +++ b/.github/workflows/test-integrations-network.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -75,7 +75,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -106,7 +106,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -142,7 +142,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-tasks.yml b/.github/workflows/test-integrations-tasks.yml index f842683285..0038f1d050 100644 --- a/.github/workflows/test-integrations-tasks.yml +++ b/.github/workflows/test-integrations-tasks.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -102,7 +102,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -133,7 +133,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -196,7 +196,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-web-1.yml b/.github/workflows/test-integrations-web-1.yml index ba802faa01..4b22db6155 100644 --- a/.github/workflows/test-integrations-web-1.yml +++ b/.github/workflows/test-integrations-web-1.yml @@ -57,7 +57,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -97,7 +97,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/.github/workflows/test-integrations-web-2.yml b/.github/workflows/test-integrations-web-2.yml index 22200f8ae1..6b7fe58815 100644 --- a/.github/workflows/test-integrations-web-2.yml +++ b/.github/workflows/test-integrations-web-2.yml @@ -39,7 +39,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -103,7 +103,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml @@ -134,7 +134,7 @@ jobs: container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 if: ${{ matrix.python-version != '3.6' }} with: python-version: ${{ matrix.python-version }} @@ -198,7 +198,7 @@ jobs: coverage xml - name: Upload coverage to Codecov if: ${{ !cancelled() }} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml diff --git a/CHANGELOG.md b/CHANGELOG.md index 28c4882414..7abbed7218 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## 2.38.0 + +### Various fixes & improvements + +- Feat(huggingface_hub): Update HuggingFace Hub integration (#4746) by @antonpirker +- Feat(Anthropic): Add proper tool calling data to Anthropic integration (#4769) by @antonpirker +- Feat(openai-agents): Add input and output to `invoke_agent` span. (#4785) by @antonpirker +- Feat(AI): Create transaction in AI agents framworks, when no transaction is running. (#4758) by @constantinius +- Feat(GraphQL): Support gql 4.0-style execute (#4779) by @sentrivana +- Fix(logs): Expect `log_item` as rate limit category (#4798) by @sentrivana +- Fix: CI for mypy, gevent (#4790) by @sentrivana +- Fix: Correctly check for a running transaction (#4791) by @antonpirker +- Fix: Use float for sample rand (#4677) by @sentrivana +- Fix: Avoid reporting false-positive StopAsyncIteration in the asyncio integration (#4741) by @vmarkovtsev +- Fix: Add log message when `DedupeIntegration` is dropping an error. (#4788) by @antonpirker +- Fix(profiling): Re-init continuous profiler (#4772) by @Zylphrex +- Chore: Reexport module `profiler` (#4535) by @zen-xu +- Tests: Update tox.ini (#4799) by @sentrivana +- Build(deps): bump actions/create-github-app-token from 2.1.1 to 2.1.4 (#4795) by @dependabot +- Build(deps): bump actions/setup-python from 5 to 6 (#4774) by @dependabot +- Build(deps): bump codecov/codecov-action from 5.5.0 to 5.5.1 (#4773) by @dependabot + ## 2.37.1 ### Various fixes & improvements diff --git a/docs/conf.py b/docs/conf.py index 28a49b7fa7..061b2bdfc8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -31,7 +31,7 @@ copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year) author = "Sentry Team and Contributors" -release = "2.37.1" +release = "2.38.0" version = ".".join(release.split(".")[:2]) # The short X.Y version. diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py index 679ffddf2c..bc20d531b3 100644 --- a/scripts/populate_tox/config.py +++ b/scripts/populate_tox/config.py @@ -155,6 +155,9 @@ }, "huggingface_hub": { "package": "huggingface_hub", + "deps": { + "*": ["responses"], + }, }, "langchain-base": { "package": "langchain", diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index ef2e89c88c..4a4bd96c52 100755 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -134,6 +134,7 @@ deps = {py3.8,py3.9,py3.10,py3.11,py3.12}-gevent: pytest gevent: pytest-asyncio {py3.10,py3.11}-gevent: zope.event<5.0.0 + {py3.10,py3.11}-gevent: zope.interface<8.0 # === Integrations === diff --git a/scripts/split_tox_gh_actions/templates/test_group.jinja b/scripts/split_tox_gh_actions/templates/test_group.jinja index 4ac0d03eb2..f020a44b84 100644 --- a/scripts/split_tox_gh_actions/templates/test_group.jinja +++ b/scripts/split_tox_gh_actions/templates/test_group.jinja @@ -43,7 +43,7 @@ {% raw %}container: ${{ matrix.python-version == '3.6' && 'python:3.6' || null }}{% endraw %} steps: - uses: actions/checkout@v5.0.0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 {% raw %}if: ${{ matrix.python-version != '3.6' }}{% endraw %} with: python-version: {% raw %}${{ matrix.python-version }}{% endraw %} @@ -100,7 +100,7 @@ - name: Upload coverage to Codecov if: {% raw %}${{ !cancelled() }}{% endraw %} - uses: codecov/codecov-action@v5.5.0 + uses: codecov/codecov-action@v5.5.1 with: token: {% raw %}${{ secrets.CODECOV_TOKEN }}{% endraw %} files: coverage.xml diff --git a/sentry_sdk/__init__.py b/sentry_sdk/__init__.py index a37b52ff4e..1939be0510 100644 --- a/sentry_sdk/__init__.py +++ b/sentry_sdk/__init__.py @@ -1,10 +1,10 @@ +from sentry_sdk import profiler from sentry_sdk.scope import Scope from sentry_sdk.transport import Transport, HttpTransport from sentry_sdk.client import Client from sentry_sdk.api import * # noqa - -from sentry_sdk.consts import VERSION # noqa +from sentry_sdk.consts import VERSION __all__ = [ # noqa "Hub", @@ -12,6 +12,7 @@ "Client", "Transport", "HttpTransport", + "VERSION", "integrations", # From sentry_sdk.api "init", @@ -47,6 +48,7 @@ "trace", "monitor", "logger", + "profiler", "start_session", "end_session", "set_transaction_name", diff --git a/sentry_sdk/_types.py b/sentry_sdk/_types.py index 8336617a8d..b28c7260ce 100644 --- a/sentry_sdk/_types.py +++ b/sentry_sdk/_types.py @@ -269,7 +269,7 @@ class SDKInfo(TypedDict): "metric_bucket", "monitor", "span", - "log", + "log_item", ] SessionStatus = Literal["ok", "exited", "crashed", "abnormal"] diff --git a/sentry_sdk/ai/utils.py b/sentry_sdk/ai/utils.py index 2dc0de4ef3..d0ccf1bed3 100644 --- a/sentry_sdk/ai/utils.py +++ b/sentry_sdk/ai/utils.py @@ -3,9 +3,10 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Any + from typing import Any, Callable from sentry_sdk.tracing import Span +import sentry_sdk from sentry_sdk.utils import logger @@ -37,3 +38,12 @@ def set_data_normalized(span, key, value, unpack=True): span.set_data(key, normalized) else: span.set_data(key, json.dumps(normalized)) + + +def get_start_span_function(): + # type: () -> Callable[..., Any] + current_span = sentry_sdk.get_current_span() + transaction_exists = ( + current_span is not None and current_span.containing_transaction is not None + ) + return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 4f015643d4..91a1740526 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -795,6 +795,7 @@ class OP: GEN_AI_CREATE_AGENT = "gen_ai.create_agent" GEN_AI_EMBEDDINGS = "gen_ai.embeddings" GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool" + GEN_AI_GENERATE_TEXT = "gen_ai.generate_text" GEN_AI_HANDOFF = "gen_ai.handoff" GEN_AI_PIPELINE = "gen_ai.pipeline" GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent" @@ -1330,4 +1331,4 @@ def _get_default_options(): del _get_default_options -VERSION = "2.37.1" +VERSION = "2.38.0" diff --git a/sentry_sdk/envelope.py b/sentry_sdk/envelope.py index 5f7220bf21..7dbbdec5c8 100644 --- a/sentry_sdk/envelope.py +++ b/sentry_sdk/envelope.py @@ -273,7 +273,7 @@ def data_category(self): elif ty == "event": return "error" elif ty == "log": - return "log" + return "log_item" elif ty == "client_report": return "internal" elif ty == "profile": diff --git a/sentry_sdk/integrations/__init__.py b/sentry_sdk/integrations/__init__.py index 7f202221a7..2f5a1f397e 100644 --- a/sentry_sdk/integrations/__init__.py +++ b/sentry_sdk/integrations/__init__.py @@ -141,7 +141,7 @@ def iter_default_integrations(with_auto_enabling_integrations): "gql": (3, 4, 1), "graphene": (3, 3), "grpc": (1, 32, 0), # grpcio - "huggingface_hub": (0, 22), + "huggingface_hub": (0, 24, 7), "langchain": (0, 1, 0), "langgraph": (0, 6, 6), "launchdarkly": (9, 8, 0), diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index 05d45ef62f..4f4c0b1a2a 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -1,10 +1,9 @@ from functools import wraps -import json from typing import TYPE_CHECKING import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage -from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -117,8 +116,29 @@ def _set_input_data(span, kwargs, integration): and should_send_default_pii() and integration.include_prompts ): + normalized_messages = [] + for message in messages: + if ( + message.get("role") == "user" + and "content" in message + and isinstance(message["content"], (list, tuple)) + ): + for item in message["content"]: + if item.get("type") == "tool_result": + normalized_messages.append( + { + "role": "tool", + "content": { + "tool_use_id": item.get("tool_use_id"), + "output": item.get("content"), + }, + } + ) + else: + normalized_messages.append(message) + set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(messages) + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False ) set_data_normalized( @@ -159,12 +179,29 @@ def _set_output_data( Set output data for the span based on the AI response.""" span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model) if should_send_default_pii() and integration.include_prompts: - set_data_normalized( - span, - SPANDATA.GEN_AI_RESPONSE_TEXT, - json.dumps(content_blocks), - unpack=False, - ) + output_messages = { + "response": [], + "tool": [], + } # type: (dict[str, list[Any]]) + + for output in content_blocks: + if output["type"] == "text": + output_messages["response"].append(output["text"]) + elif output["type"] == "tool_use": + output_messages["tool"].append(output) + + if len(output_messages["tool"]) > 0: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + output_messages["tool"], + unpack=False, + ) + + if len(output_messages["response"]) > 0: + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] + ) record_token_usage( span, @@ -172,8 +209,6 @@ def _set_output_data( output_tokens=output_tokens, ) - # TODO: GEN_AI_RESPONSE_TOOL_CALLS ? - if finish_span: span.__exit__(None, None, None) @@ -194,7 +229,7 @@ def _sentry_patched_create_common(f, *args, **kwargs): model = kwargs.get("model", "") - span = sentry_sdk.start_span( + span = get_start_span_function()( op=OP.GEN_AI_CHAT, name=f"chat {model}".strip(), origin=AnthropicIntegration.origin, diff --git a/sentry_sdk/integrations/asyncio.py b/sentry_sdk/integrations/asyncio.py index ae580ca038..66742fe6e4 100644 --- a/sentry_sdk/integrations/asyncio.py +++ b/sentry_sdk/integrations/asyncio.py @@ -51,6 +51,8 @@ async def _task_with_sentry_span_creation(): ): try: result = await coro + except StopAsyncIteration as e: + raise e from None except Exception: reraise(*_capture_exception()) diff --git a/sentry_sdk/integrations/dedupe.py b/sentry_sdk/integrations/dedupe.py index a115e35292..eab2764fcd 100644 --- a/sentry_sdk/integrations/dedupe.py +++ b/sentry_sdk/integrations/dedupe.py @@ -1,5 +1,5 @@ import sentry_sdk -from sentry_sdk.utils import ContextVar +from sentry_sdk.utils import ContextVar, logger from sentry_sdk.integrations import Integration from sentry_sdk.scope import add_global_event_processor @@ -37,7 +37,9 @@ def processor(event, hint): exc = exc_info[1] if integration._last_seen.get(None) is exc: + logger.info("DedupeIntegration dropped duplicated error event %s", exc) return None + integration._last_seen.set(exc) return event diff --git a/sentry_sdk/integrations/gql.py b/sentry_sdk/integrations/gql.py index 5f4436f5b2..8c378060b7 100644 --- a/sentry_sdk/integrations/gql.py +++ b/sentry_sdk/integrations/gql.py @@ -18,6 +18,13 @@ ) from gql.transport import Transport, AsyncTransport # type: ignore[import-not-found] from gql.transport.exceptions import TransportQueryError # type: ignore[import-not-found] + + try: + # gql 4.0+ + from gql import GraphQLRequest + except ImportError: + GraphQLRequest = None + except ImportError: raise DidNotEnable("gql is not installed") @@ -92,13 +99,13 @@ def _patch_execute(): real_execute = gql.Client.execute @ensure_integration_enabled(GQLIntegration, real_execute) - def sentry_patched_execute(self, document, *args, **kwargs): + def sentry_patched_execute(self, document_or_request, *args, **kwargs): # type: (gql.Client, DocumentNode, Any, Any) -> Any scope = sentry_sdk.get_isolation_scope() - scope.add_event_processor(_make_gql_event_processor(self, document)) + scope.add_event_processor(_make_gql_event_processor(self, document_or_request)) try: - return real_execute(self, document, *args, **kwargs) + return real_execute(self, document_or_request, *args, **kwargs) except TransportQueryError as e: event, hint = event_from_exception( e, @@ -112,8 +119,8 @@ def sentry_patched_execute(self, document, *args, **kwargs): gql.Client.execute = sentry_patched_execute -def _make_gql_event_processor(client, document): - # type: (gql.Client, DocumentNode) -> EventProcessor +def _make_gql_event_processor(client, document_or_request): + # type: (gql.Client, Union[DocumentNode, gql.GraphQLRequest]) -> EventProcessor def processor(event, hint): # type: (Event, dict[str, Any]) -> Event try: @@ -130,6 +137,16 @@ def processor(event, hint): ) if should_send_default_pii(): + if GraphQLRequest is not None and isinstance( + document_or_request, GraphQLRequest + ): + # In v4.0.0, gql moved to using GraphQLRequest instead of + # DocumentNode in execute + # https://github.com/graphql-python/gql/pull/556 + document = document_or_request.document + else: + document = document_or_request + request["data"] = _data_from_document(document) contexts = event.setdefault("contexts", {}) response = contexts.setdefault("response", {}) diff --git a/sentry_sdk/integrations/huggingface_hub.py b/sentry_sdk/integrations/huggingface_hub.py index 2dfcb5925a..cb76ccf507 100644 --- a/sentry_sdk/integrations/huggingface_hub.py +++ b/sentry_sdk/integrations/huggingface_hub.py @@ -1,24 +1,24 @@ +import inspect from functools import wraps -from sentry_sdk import consts +import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import SPANDATA - -from typing import Any, Iterable, Callable - -import sentry_sdk -from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( capture_internal_exceptions, event_from_exception, ) +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Any, Callable, Iterable + try: import huggingface_hub.inference._client - - from huggingface_hub import ChatCompletionStreamOutput, TextGenerationOutput except ImportError: raise DidNotEnable("Huggingface not installed") @@ -34,9 +34,18 @@ def __init__(self, include_prompts=True): @staticmethod def setup_once(): # type: () -> None + + # Other tasks that can be called: https://huggingface.co/docs/huggingface_hub/guides/inference#supported-providers-and-tasks huggingface_hub.inference._client.InferenceClient.text_generation = ( - _wrap_text_generation( - huggingface_hub.inference._client.InferenceClient.text_generation + _wrap_huggingface_task( + huggingface_hub.inference._client.InferenceClient.text_generation, + OP.GEN_AI_GENERATE_TEXT, + ) + ) + huggingface_hub.inference._client.InferenceClient.chat_completion = ( + _wrap_huggingface_task( + huggingface_hub.inference._client.InferenceClient.chat_completion, + OP.GEN_AI_CHAT, ) ) @@ -51,131 +60,318 @@ def _capture_exception(exc): sentry_sdk.capture_event(event, hint=hint) -def _wrap_text_generation(f): - # type: (Callable[..., Any]) -> Callable[..., Any] +def _wrap_huggingface_task(f, op): + # type: (Callable[..., Any], str) -> Callable[..., Any] @wraps(f) - def new_text_generation(*args, **kwargs): + def new_huggingface_task(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(HuggingfaceHubIntegration) if integration is None: return f(*args, **kwargs) + prompt = None if "prompt" in kwargs: prompt = kwargs["prompt"] + elif "messages" in kwargs: + prompt = kwargs["messages"] elif len(args) >= 2: - kwargs["prompt"] = args[1] - prompt = kwargs["prompt"] - args = (args[0],) + args[2:] - else: - # invalid call, let it return error + if isinstance(args[1], str) or isinstance(args[1], list): + prompt = args[1] + + if prompt is None: + # invalid call, dont instrument, let it return error return f(*args, **kwargs) - model = kwargs.get("model") - streaming = kwargs.get("stream") + client = args[0] + model = client.model or kwargs.get("model") or "" + operation_name = op.split(".")[-1] span = sentry_sdk.start_span( - op=consts.OP.HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE, - name="Text Generation", + op=op, + name=f"{operation_name} {model}", origin=HuggingfaceHubIntegration.origin, ) span.__enter__() + + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, operation_name) + + if model: + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model) + + # Input attributes + if should_send_default_pii() and integration.include_prompts: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompt, unpack=False + ) + + attribute_mapping = { + "tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, + "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, + "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, + "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, + "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, + "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING, + } + + for attribute, span_attribute in attribute_mapping.items(): + value = kwargs.get(attribute, None) + if value is not None: + if isinstance(value, (int, float, bool, str)): + span.set_data(span_attribute, value) + else: + set_data_normalized(span, span_attribute, value, unpack=False) + + # LLM Execution try: res = f(*args, **kwargs) except Exception as e: + # Error Handling + span.set_status("error") _capture_exception(e) span.__exit__(None, None, None) raise e from None + # Output attributes + finish_reason = None + response_model = None + response_text_buffer: list[str] = [] + tokens_used = 0 + tool_calls = None + usage = None + with capture_internal_exceptions(): - if should_send_default_pii() and integration.include_prompts: - set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompt) + if isinstance(res, str) and res is not None: + response_text_buffer.append(res) - set_data_normalized(span, SPANDATA.AI_MODEL_ID, model) - set_data_normalized(span, SPANDATA.AI_STREAMING, streaming) + if hasattr(res, "generated_text") and res.generated_text is not None: + response_text_buffer.append(res.generated_text) - if isinstance(res, str): - if should_send_default_pii() and integration.include_prompts: - set_data_normalized( - span, - SPANDATA.AI_RESPONSES, - [res], - ) - span.__exit__(None, None, None) - return res + if hasattr(res, "model") and res.model is not None: + response_model = res.model + + if hasattr(res, "details") and hasattr(res.details, "finish_reason"): + finish_reason = res.details.finish_reason + + if ( + hasattr(res, "details") + and hasattr(res.details, "generated_tokens") + and res.details.generated_tokens is not None + ): + tokens_used = res.details.generated_tokens + + if hasattr(res, "usage") and res.usage is not None: + usage = res.usage + + if hasattr(res, "choices") and res.choices is not None: + for choice in res.choices: + if hasattr(choice, "finish_reason"): + finish_reason = choice.finish_reason + if hasattr(choice, "message") and hasattr( + choice.message, "tool_calls" + ): + tool_calls = choice.message.tool_calls + if ( + hasattr(choice, "message") + and hasattr(choice.message, "content") + and choice.message.content is not None + ): + response_text_buffer.append(choice.message.content) - if isinstance(res, TextGenerationOutput): - if should_send_default_pii() and integration.include_prompts: + if response_model is not None: + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model) + + if finish_reason is not None: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + finish_reason, + ) + + if should_send_default_pii() and integration.include_prompts: + if tool_calls is not None and len(tool_calls) > 0: set_data_normalized( span, - SPANDATA.AI_RESPONSES, - [res.generated_text], - ) - if res.details is not None and res.details.generated_tokens > 0: - record_token_usage( - span, - total_tokens=res.details.generated_tokens, + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + tool_calls, + unpack=False, ) - span.__exit__(None, None, None) - return res - if not isinstance(res, Iterable): - # we only know how to deal with strings and iterables, ignore - set_data_normalized(span, "unknown_response", True) + if len(response_text_buffer) > 0: + text_response = "".join(response_text_buffer) + if text_response: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TEXT, + text_response, + ) + + if usage is not None: + record_token_usage( + span, + input_tokens=usage.prompt_tokens, + output_tokens=usage.completion_tokens, + total_tokens=usage.total_tokens, + ) + elif tokens_used > 0: + record_token_usage( + span, + total_tokens=tokens_used, + ) + + # If the response is not a generator (meaning a streaming response) + # we are done and can return the response + if not inspect.isgenerator(res): span.__exit__(None, None, None) return res if kwargs.get("details", False): - # res is Iterable[TextGenerationStreamOutput] + # text-generation stream output def new_details_iterator(): - # type: () -> Iterable[ChatCompletionStreamOutput] + # type: () -> Iterable[Any] + finish_reason = None + response_text_buffer: list[str] = [] + tokens_used = 0 + with capture_internal_exceptions(): - tokens_used = 0 - data_buf: list[str] = [] - for x in res: - if hasattr(x, "token") and hasattr(x.token, "text"): - data_buf.append(x.token.text) - if hasattr(x, "details") and hasattr( - x.details, "generated_tokens" + for chunk in res: + if ( + hasattr(chunk, "token") + and hasattr(chunk.token, "text") + and chunk.token.text is not None + ): + response_text_buffer.append(chunk.token.text) + + if hasattr(chunk, "details") and hasattr( + chunk.details, "finish_reason" + ): + finish_reason = chunk.details.finish_reason + + if ( + hasattr(chunk, "details") + and hasattr(chunk.details, "generated_tokens") + and chunk.details.generated_tokens is not None ): - tokens_used = x.details.generated_tokens - yield x - if ( - len(data_buf) > 0 - and should_send_default_pii() - and integration.include_prompts - ): + tokens_used = chunk.details.generated_tokens + + yield chunk + + if finish_reason is not None: set_data_normalized( - span, SPANDATA.AI_RESPONSES, "".join(data_buf) + span, + SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + finish_reason, ) + + if should_send_default_pii() and integration.include_prompts: + if len(response_text_buffer) > 0: + text_response = "".join(response_text_buffer) + if text_response: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TEXT, + text_response, + ) + if tokens_used > 0: record_token_usage( span, total_tokens=tokens_used, ) + span.__exit__(None, None, None) return new_details_iterator() - else: - # res is Iterable[str] + else: + # chat-completion stream output def new_iterator(): # type: () -> Iterable[str] - data_buf: list[str] = [] + finish_reason = None + response_model = None + response_text_buffer: list[str] = [] + tool_calls = None + usage = None + with capture_internal_exceptions(): - for s in res: - if isinstance(s, str): - data_buf.append(s) - yield s - if ( - len(data_buf) > 0 - and should_send_default_pii() - and integration.include_prompts - ): + for chunk in res: + if hasattr(chunk, "model") and chunk.model is not None: + response_model = chunk.model + + if hasattr(chunk, "usage") and chunk.usage is not None: + usage = chunk.usage + + if isinstance(chunk, str): + if chunk is not None: + response_text_buffer.append(chunk) + + if hasattr(chunk, "choices") and chunk.choices is not None: + for choice in chunk.choices: + if ( + hasattr(choice, "delta") + and hasattr(choice.delta, "content") + and choice.delta.content is not None + ): + response_text_buffer.append( + choice.delta.content + ) + + if ( + hasattr(choice, "finish_reason") + and choice.finish_reason is not None + ): + finish_reason = choice.finish_reason + + if ( + hasattr(choice, "delta") + and hasattr(choice.delta, "tool_calls") + and choice.delta.tool_calls is not None + ): + tool_calls = choice.delta.tool_calls + + yield chunk + + if response_model is not None: + span.set_data( + SPANDATA.GEN_AI_RESPONSE_MODEL, response_model + ) + + if finish_reason is not None: set_data_normalized( - span, SPANDATA.AI_RESPONSES, "".join(data_buf) + span, + SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + finish_reason, ) + + if should_send_default_pii() and integration.include_prompts: + if tool_calls is not None and len(tool_calls) > 0: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + tool_calls, + unpack=False, + ) + + if len(response_text_buffer) > 0: + text_response = "".join(response_text_buffer) + if text_response: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TEXT, + text_response, + ) + + if usage is not None: + record_token_usage( + span, + input_tokens=usage.prompt_tokens, + output_tokens=usage.completion_tokens, + total_tokens=usage.total_tokens, + ) + span.__exit__(None, None, None) return new_iterator() - return new_text_generation + return new_huggingface_task diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index e14dd619fe..1401be06e1 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -4,7 +4,7 @@ import sentry_sdk from sentry_sdk.ai.monitoring import set_ai_pipeline_name -from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -716,8 +716,9 @@ def new_invoke(self, *args, **kwargs): return f(self, *args, **kwargs) agent_name, tools = _get_request_data(self, args, kwargs) + start_span_function = get_start_span_function() - with sentry_sdk.start_span( + with start_span_function( op=OP.GEN_AI_INVOKE_AGENT, name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent", origin=LangchainIntegration.origin, @@ -767,8 +768,9 @@ def new_stream(self, *args, **kwargs): return f(self, *args, **kwargs) agent_name, tools = _get_request_data(self, args, kwargs) + start_span_function = get_start_span_function() - span = sentry_sdk.start_span( + span = start_span_function( op=OP.GEN_AI_INVOKE_AGENT, name=f"invoke_agent {agent_name}".strip(), origin=LangchainIntegration.origin, diff --git a/sentry_sdk/integrations/openai_agents/patches/agent_run.py b/sentry_sdk/integrations/openai_agents/patches/agent_run.py index 29002f6619..5473915b48 100644 --- a/sentry_sdk/integrations/openai_agents/patches/agent_run.py +++ b/sentry_sdk/integrations/openai_agents/patches/agent_run.py @@ -26,12 +26,12 @@ def _patch_agent_run(): original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs original_execute_final_output = agents._run_impl.RunImpl.execute_final_output - def _start_invoke_agent_span(context_wrapper, agent): - # type: (agents.RunContextWrapper, agents.Agent) -> None + def _start_invoke_agent_span(context_wrapper, agent, kwargs): + # type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> None """Start an agent invocation span""" # Store the agent on the context wrapper so we can access it later context_wrapper._sentry_current_agent = agent - invoke_agent_span(context_wrapper, agent) + invoke_agent_span(context_wrapper, agent, kwargs) def _end_invoke_agent_span(context_wrapper, agent, output=None): # type: (agents.RunContextWrapper, agents.Agent, Optional[Any]) -> None @@ -72,7 +72,7 @@ async def patched_run_single_turn(cls, *args, **kwargs): if current_agent and current_agent != agent: _end_invoke_agent_span(context_wrapper, current_agent) - _start_invoke_agent_span(context_wrapper, agent) + _start_invoke_agent_span(context_wrapper, agent, kwargs) # Call original method with all the correct parameters result = await original_run_single_turn(*args, **kwargs) diff --git a/sentry_sdk/integrations/openai_agents/spans/agent_workflow.py b/sentry_sdk/integrations/openai_agents/spans/agent_workflow.py index de2f28d41e..ef69b856e3 100644 --- a/sentry_sdk/integrations/openai_agents/spans/agent_workflow.py +++ b/sentry_sdk/integrations/openai_agents/spans/agent_workflow.py @@ -1,7 +1,7 @@ import sentry_sdk +from sentry_sdk.ai.utils import get_start_span_function from ..consts import SPAN_ORIGIN -from ..utils import _get_start_span_function from typing import TYPE_CHECKING @@ -13,7 +13,7 @@ def agent_workflow_span(agent): # type: (agents.Agent) -> sentry_sdk.tracing.Span # Create a transaction or a span if an transaction is already active - span = _get_start_span_function()( + span = get_start_span_function()( name=f"{agent.name} workflow", origin=SPAN_ORIGIN, ) diff --git a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py index 549ade1246..cf06120625 100644 --- a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +++ b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py @@ -1,5 +1,8 @@ import sentry_sdk +from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import safe_serialize from ..consts import SPAN_ORIGIN from ..utils import _set_agent_data @@ -11,9 +14,10 @@ from typing import Any -def invoke_agent_span(context, agent): - # type: (agents.RunContextWrapper, agents.Agent) -> sentry_sdk.tracing.Span - span = sentry_sdk.start_span( +def invoke_agent_span(context, agent, kwargs): + # type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> sentry_sdk.tracing.Span + start_span_function = get_start_span_function() + span = start_span_function( op=OP.GEN_AI_INVOKE_AGENT, name=f"invoke_agent {agent.name}", origin=SPAN_ORIGIN, @@ -22,6 +26,40 @@ def invoke_agent_span(context, agent): span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + if should_send_default_pii(): + messages = [] + if agent.instructions: + message = ( + agent.instructions + if isinstance(agent.instructions, str) + else safe_serialize(agent.instructions) + ) + messages.append( + { + "content": [{"text": message, "type": "text"}], + "role": "system", + } + ) + + original_input = kwargs.get("original_input") + if original_input is not None: + message = ( + original_input + if isinstance(original_input, str) + else safe_serialize(original_input) + ) + messages.append( + { + "content": [{"text": message, "type": "text"}], + "role": "user", + } + ) + + if len(messages) > 0: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False + ) + _set_agent_data(span, agent) return span @@ -29,6 +67,12 @@ def invoke_agent_span(context, agent): def update_invoke_agent_span(context, agent, output): # type: (agents.RunContextWrapper, agents.Agent, Any) -> None - current_span = sentry_sdk.get_current_span() - if current_span: - current_span.__exit__(None, None, None) + span = sentry_sdk.get_current_span() + + if span: + if should_send_default_pii(): + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False + ) + + span.__exit__(None, None, None) diff --git a/sentry_sdk/integrations/openai_agents/utils.py b/sentry_sdk/integrations/openai_agents/utils.py index 44b260d4bc..a0487e0e3a 100644 --- a/sentry_sdk/integrations/openai_agents/utils.py +++ b/sentry_sdk/integrations/openai_agents/utils.py @@ -9,7 +9,6 @@ if TYPE_CHECKING: from typing import Any - from typing import Callable from agents import Usage try: @@ -29,15 +28,6 @@ def _capture_exception(exc): sentry_sdk.capture_event(event, hint=hint) -def _get_start_span_function(): - # type: () -> Callable[..., Any] - current_span = sentry_sdk.get_current_span() - transaction_exists = ( - current_span is not None and current_span.containing_transaction == current_span - ) - return sentry_sdk.start_span if transaction_exists else sentry_sdk.start_transaction - - def _set_agent_data(span, agent): # type: (sentry_sdk.tracing.Span, agents.Agent) -> None span.set_data( diff --git a/sentry_sdk/integrations/threading.py b/sentry_sdk/integrations/threading.py index fc4f539228..c031c51f50 100644 --- a/sentry_sdk/integrations/threading.py +++ b/sentry_sdk/integrations/threading.py @@ -52,7 +52,7 @@ def setup_once(): try: from django import VERSION as django_version # noqa: N811 - import channels # type: ignore[import-not-found] + import channels # type: ignore[import-untyped] channels_version = channels.__version__ except ImportError: diff --git a/sentry_sdk/profiler/continuous_profiler.py b/sentry_sdk/profiler/continuous_profiler.py index 00dd29e36c..165bd13837 100644 --- a/sentry_sdk/profiler/continuous_profiler.py +++ b/sentry_sdk/profiler/continuous_profiler.py @@ -75,9 +75,11 @@ def setup_continuous_profiler(options, sdk_info, capture_func): # type: (Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> bool global _scheduler - if _scheduler is not None: + already_initialized = _scheduler is not None + + if already_initialized: logger.debug("[Profiling] Continuous Profiler is already setup") - return False + teardown_continuous_profiler() if is_gevent(): # If gevent has patched the threading modules then we cannot rely on @@ -117,11 +119,19 @@ def setup_continuous_profiler(options, sdk_info, capture_func): ) ) - atexit.register(teardown_continuous_profiler) + if not already_initialized: + atexit.register(teardown_continuous_profiler) return True +def is_profile_session_sampled(): + # type: () -> bool + if _scheduler is None: + return False + return _scheduler.sampled + + def try_autostart_continuous_profiler(): # type: () -> None diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index 0d1fcc45da..fc43a33dc7 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -1,4 +1,3 @@ -from decimal import Decimal import uuid import warnings from datetime import datetime, timedelta, timezone @@ -1251,7 +1250,7 @@ def _set_initial_sampling_decision(self, sampling_context): return # Now we roll the dice. - self.sampled = self._sample_rand < Decimal.from_float(self.sample_rate) + self.sampled = self._sample_rand < self.sample_rate if self.sampled: logger.debug( diff --git a/sentry_sdk/tracing_utils.py b/sentry_sdk/tracing_utils.py index b31d3d85c5..c1cfde293b 100644 --- a/sentry_sdk/tracing_utils.py +++ b/sentry_sdk/tracing_utils.py @@ -6,7 +6,6 @@ import sys from collections.abc import Mapping from datetime import timedelta -from decimal import ROUND_DOWN, Decimal, DefaultContext, localcontext from random import Random from urllib.parse import quote, unquote import uuid @@ -502,7 +501,7 @@ def _fill_sample_rand(self): return sample_rand = try_convert( - Decimal, self.dynamic_sampling_context.get("sample_rand") + float, self.dynamic_sampling_context.get("sample_rand") ) if sample_rand is not None and 0 <= sample_rand < 1: # sample_rand is present and valid, so don't overwrite it @@ -650,7 +649,7 @@ def populate_from_transaction(cls, transaction): options = client.options or {} sentry_items["trace_id"] = transaction.trace_id - sentry_items["sample_rand"] = str(transaction._sample_rand) + sentry_items["sample_rand"] = f"{transaction._sample_rand:.6f}" # noqa: E231 if options.get("environment"): sentry_items["environment"] = options["environment"] @@ -724,15 +723,15 @@ def strip_sentry_baggage(header): ) def _sample_rand(self): - # type: () -> Optional[Decimal] + # type: () -> Optional[float] """Convenience method to get the sample_rand value from the sentry_items. - We validate the value and parse it as a Decimal before returning it. The value is considered - valid if it is a Decimal in the range [0, 1). + We validate the value and parse it as a float before returning it. The value is considered + valid if it is a float in the range [0, 1). """ - sample_rand = try_convert(Decimal, self.sentry_items.get("sample_rand")) + sample_rand = try_convert(float, self.sentry_items.get("sample_rand")) - if sample_rand is not None and Decimal(0) <= sample_rand < Decimal(1): + if sample_rand is not None and 0.0 <= sample_rand < 1.0: return sample_rand return None @@ -898,7 +897,7 @@ def _generate_sample_rand( *, interval=(0.0, 1.0), # type: tuple[float, float] ): - # type: (...) -> Decimal + # type: (...) -> float """Generate a sample_rand value from a trace ID. The generated value will be pseudorandomly chosen from the provided @@ -913,19 +912,16 @@ def _generate_sample_rand( raise ValueError("Invalid interval: lower must be less than upper") rng = Random(trace_id) - sample_rand = upper - while sample_rand >= upper: - sample_rand = rng.uniform(lower, upper) - - # Round down to exactly six decimal-digit precision. - # Setting the context is needed to avoid an InvalidOperation exception - # in case the user has changed the default precision or set traps. - with localcontext(DefaultContext) as ctx: - ctx.prec = 6 - return Decimal(sample_rand).quantize( - Decimal("0.000001"), - rounding=ROUND_DOWN, - ) + lower_scaled = int(lower * 1_000_000) + upper_scaled = int(upper * 1_000_000) + try: + sample_rand_scaled = rng.randrange(lower_scaled, upper_scaled) + except ValueError: + # In some corner cases it might happen that the range is too small + # In that case, just take the lower bound + sample_rand_scaled = lower_scaled + + return sample_rand_scaled / 1_000_000 def _sample_rand_range(parent_sampled, sample_rate): diff --git a/sentry_sdk/utils.py b/sentry_sdk/utils.py index b0f3fa4a4c..3fe3ac3eec 100644 --- a/sentry_sdk/utils.py +++ b/sentry_sdk/utils.py @@ -1934,6 +1934,12 @@ def try_convert(convert_func, value): given function. Return None if the conversion fails, i.e. if the function raises an exception. """ + try: + if isinstance(value, convert_func): # type: ignore + return value + except TypeError: + pass + try: return convert_func(value) except Exception: diff --git a/setup.py b/setup.py index 1b4d0063e4..58101aa65f 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ def get_file_text(file_name): setup( name="sentry-sdk", - version="2.37.1", + version="2.38.0", author="Sentry Team and Contributors", author_email="hello@sentry.io", url="https://github.com/getsentry/sentry-python", diff --git a/tests/integrations/aiohttp/test_aiohttp.py b/tests/integrations/aiohttp/test_aiohttp.py index dbb4286370..267ce08fdd 100644 --- a/tests/integrations/aiohttp/test_aiohttp.py +++ b/tests/integrations/aiohttp/test_aiohttp.py @@ -618,7 +618,7 @@ async def handler(request): raw_server = await aiohttp_raw_server(handler) - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.5): + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=500000): with start_transaction( name="/interactions/other-dogs/new-dog", op="greeting.sniff", diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index eba07a1df6..3893626026 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -1,6 +1,6 @@ +import pytest from unittest import mock - try: from unittest.mock import AsyncMock except ImportError: @@ -10,7 +10,6 @@ async def __call__(self, *args, **kwargs): return super(AsyncMock, self).__call__(*args, **kwargs) -import pytest from anthropic import Anthropic, AnthropicError, AsyncAnthropic, AsyncStream, Stream from anthropic.types import MessageDeltaUsage, TextDelta, Usage from anthropic.types.content_block_delta_event import ContentBlockDeltaEvent @@ -20,9 +19,6 @@ async def __call__(self, *args, **kwargs): from anthropic.types.message_delta_event import MessageDeltaEvent from anthropic.types.message_start_event import MessageStartEvent -from sentry_sdk.integrations.anthropic import _set_output_data, _collect_ai_data -from sentry_sdk.utils import package_version - try: from anthropic.types import InputJSONDelta except ImportError: @@ -46,9 +42,16 @@ async def __call__(self, *args, **kwargs): from sentry_sdk import start_transaction, start_span from sentry_sdk.consts import OP, SPANDATA -from sentry_sdk.integrations.anthropic import AnthropicIntegration +from sentry_sdk.integrations.anthropic import ( + AnthropicIntegration, + _set_output_data, + _collect_ai_data, +) +from sentry_sdk.utils import package_version + ANTHROPIC_VERSION = package_version("anthropic") + EXAMPLE_MESSAGE = Message( id="id", model="model", @@ -121,10 +124,7 @@ def test_nonstreaming_create_message( span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - == '[{"text": "Hi, I\'m Claude.", "type": "text"}]' - ) + assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] @@ -193,10 +193,7 @@ async def test_nonstreaming_create_message_async( span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - == '[{"text": "Hi, I\'m Claude.", "type": "text"}]' - ) + assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] @@ -296,10 +293,7 @@ def test_streaming_create_message( span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - == '[{"text": "Hi! I\'m Claude!", "type": "text"}]' - ) + assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] @@ -403,10 +397,7 @@ async def test_streaming_create_message_async( span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - == '[{"text": "Hi! I\'m Claude!", "type": "text"}]' - ) + assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] @@ -539,7 +530,7 @@ def test_streaming_create_message_with_input_json_delta( ) assert ( span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - == '[{"text": "{\'location\': \'San Francisco, CA\'}", "type": "text"}]' + == "{'location': 'San Francisco, CA'}" ) else: assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] @@ -679,7 +670,7 @@ async def test_streaming_create_message_with_input_json_delta_async( ) assert ( span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - == '[{"text": "{\'location\': \'San Francisco, CA\'}", "type": "text"}]' + == "{'location': 'San Francisco, CA'}" ) else: @@ -835,7 +826,7 @@ def test_set_output_data_with_input_json_delta(sentry_init): assert ( span._data.get(SPANDATA.GEN_AI_RESPONSE_TEXT) - == "[{\"text\": \"{'test': 'data','more': 'json'}\", \"type\": \"text\"}]" + == "{'test': 'data','more': 'json'}" ) assert span._data.get(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS) == 10 assert span._data.get(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS) == 20 diff --git a/tests/integrations/celery/test_celery.py b/tests/integrations/celery/test_celery.py index ce2e693143..80b4a423cb 100644 --- a/tests/integrations/celery/test_celery.py +++ b/tests/integrations/celery/test_celery.py @@ -518,8 +518,8 @@ def test_baggage_propagation(init_celery): def dummy_task(self, x, y): return _get_headers(self) - # patch random.uniform to return a predictable sample_rand value - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.5): + # patch random.randrange to return a predictable sample_rand value + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=500000): with start_transaction() as transaction: result = dummy_task.apply_async( args=(1, 0), diff --git a/tests/integrations/httpx/test_httpx.py b/tests/integrations/httpx/test_httpx.py index 5a35b68076..ba2575ce59 100644 --- a/tests/integrations/httpx/test_httpx.py +++ b/tests/integrations/httpx/test_httpx.py @@ -170,8 +170,8 @@ def test_outgoing_trace_headers_append_to_baggage( url = "http://example.com/" - # patch random.uniform to return a predictable sample_rand value - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.5): + # patch random.randrange to return a predictable sample_rand value + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=500000): with start_transaction( name="/interactions/other-dogs/new-dog", op="greeting.sniff", diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index df0c6c6d76..86f9c10109 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -1,186 +1,815 @@ -import itertools from unittest import mock - import pytest -from huggingface_hub import ( - InferenceClient, -) -from huggingface_hub.errors import OverloadedError +import responses + +from huggingface_hub import InferenceClient -from sentry_sdk import start_transaction -from sentry_sdk.consts import SPANDATA +import sentry_sdk +from sentry_sdk.utils import package_version from sentry_sdk.integrations.huggingface_hub import HuggingfaceHubIntegration +from typing import TYPE_CHECKING -def mock_client_post(client, post_mock): - # huggingface-hub==0.28.0 deprecates the `post` method - # so patch `_inner_post` instead - if hasattr(client, "post"): - client.post = post_mock - if hasattr(client, "_inner_post"): - client._inner_post = post_mock +try: + from huggingface_hub.utils._errors import HfHubHTTPError +except ImportError: + from huggingface_hub.errors import HfHubHTTPError -@pytest.mark.parametrize( - "send_default_pii, include_prompts, details_arg", - itertools.product([True, False], repeat=3), -) -def test_nonstreaming_chat_completion( - sentry_init, capture_events, send_default_pii, include_prompts, details_arg -): - sentry_init( - integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], - traces_sample_rate=1.0, - send_default_pii=send_default_pii, +if TYPE_CHECKING: + from typing import Any + + +HF_VERSION = package_version("huggingface-hub") + +if HF_VERSION and HF_VERSION < (0, 30, 0): + MODEL_ENDPOINT = "https://api-inference.huggingface.co/models/{model_name}" + INFERENCE_ENDPOINT = "https://api-inference.huggingface.co/models/{model_name}" +else: + MODEL_ENDPOINT = "https://huggingface.co/api/models/{model_name}" + INFERENCE_ENDPOINT = ( + "https://router.huggingface.co/hf-inference/models/{model_name}" ) - events = capture_events() - client = InferenceClient(model="https://") - if details_arg: - post_mock = mock.Mock( - return_value=b"""[{ - "generated_text": "the model response", +@pytest.fixture +def mock_hf_text_generation_api(): + # type: () -> Any + """Mock HuggingFace text generation API""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={ + "id": model_name, + "pipeline_tag": "text-generation", + "inferenceProviderMapping": { + "hf-inference": { + "status": "live", + "providerId": model_name, + "task": "text-generation", + } + }, + }, + status=200, + ) + + # Mock text generation endpoint + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name), + json={ + "generated_text": "[mocked] Hello! How can i help you?", "details": { "finish_reason": "length", "generated_tokens": 10, "prefill": [], - "tokens": [] - } - }]""" + "tokens": [], + }, + }, + status=200, + ) + + yield rsps + + +@pytest.fixture +def mock_hf_api_with_errors(): + # type: () -> Any + """Mock HuggingFace API that always raises errors for any request""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint with error + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={"error": "Model not found"}, + status=404, + ) + + # Mock text generation endpoint with error + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name), + json={"error": "Internal server error", "message": "Something went wrong"}, + status=500, + ) + + # Mock chat completion endpoint with error + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", + json={"error": "Internal server error", "message": "Something went wrong"}, + status=500, + ) + + # Catch-all pattern for any other model requests + rsps.add( + responses.GET, + "https://huggingface.co/api/models/test-model-error", + json={"error": "Generic model error"}, + status=500, + ) + + yield rsps + + +@pytest.fixture +def mock_hf_text_generation_api_streaming(): + # type: () -> Any + """Mock streaming HuggingFace text generation API""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={ + "id": model_name, + "pipeline_tag": "text-generation", + "inferenceProviderMapping": { + "hf-inference": { + "status": "live", + "providerId": model_name, + "task": "text-generation", + } + }, + }, + status=200, + ) + + # Mock text generation endpoint for streaming + streaming_response = b'data:{"token":{"id":1, "special": false, "text": "the mocked "}}\n\ndata:{"token":{"id":2, "special": false, "text": "model response"}, "details":{"finish_reason": "length", "generated_tokens": 10, "seed": 0}}\n\n' + + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name), + body=streaming_response, + status=200, + headers={ + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + "Connection": "keep-alive", + }, + ) + + yield rsps + + +@pytest.fixture +def mock_hf_chat_completion_api(): + # type: () -> Any + """Mock HuggingFace chat completion API""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={ + "id": model_name, + "pipeline_tag": "conversational", + "inferenceProviderMapping": { + "hf-inference": { + "status": "live", + "providerId": model_name, + "task": "conversational", + } + }, + }, + status=200, + ) + + # Mock chat completion endpoint + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", + json={ + "id": "xyz-123", + "created": 1234567890, + "model": f"{model_name}-123", + "system_fingerprint": "fp_123", + "choices": [ + { + "index": 0, + "finish_reason": "stop", + "message": { + "role": "assistant", + "content": "[mocked] Hello! How can I help you today?", + }, + } + ], + "usage": { + "completion_tokens": 8, + "prompt_tokens": 10, + "total_tokens": 18, + }, + }, + status=200, + ) + + yield rsps + + +@pytest.fixture +def mock_hf_chat_completion_api_tools(): + # type: () -> Any + """Mock HuggingFace chat completion API with tool calls.""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={ + "id": model_name, + "pipeline_tag": "conversational", + "inferenceProviderMapping": { + "hf-inference": { + "status": "live", + "providerId": model_name, + "task": "conversational", + } + }, + }, + status=200, + ) + + # Mock chat completion endpoint + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", + json={ + "id": "xyz-123", + "created": 1234567890, + "model": f"{model_name}-123", + "system_fingerprint": "fp_123", + "choices": [ + { + "index": 0, + "finish_reason": "tool_calls", + "message": { + "role": "assistant", + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": {"location": "Paris"}, + }, + } + ], + }, + } + ], + "usage": { + "completion_tokens": 8, + "prompt_tokens": 10, + "total_tokens": 18, + }, + }, + status=200, + ) + + yield rsps + + +@pytest.fixture +def mock_hf_chat_completion_api_streaming(): + # type: () -> Any + """Mock streaming HuggingFace chat completion API""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={ + "id": model_name, + "pipeline_tag": "conversational", + "inferenceProviderMapping": { + "hf-inference": { + "status": "live", + "providerId": model_name, + "task": "conversational", + } + }, + }, + status=200, + ) + + # Mock chat completion streaming endpoint + streaming_chat_response = ( + b'data:{"id":"xyz-123","created":1234567890,"model":"test-model-123","system_fingerprint":"fp_123","choices":[{"delta":{"role":"assistant","content":"the mocked "},"index":0,"finish_reason":null}],"usage":null}\n\n' + b'data:{"id":"xyz-124","created":1234567890,"model":"test-model-123","system_fingerprint":"fp_123","choices":[{"delta":{"role":"assistant","content":"model response"},"index":0,"finish_reason":"stop"}],"usage":{"prompt_tokens":183,"completion_tokens":14,"total_tokens":197}}\n\n' + ) + + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", + body=streaming_chat_response, + status=200, + headers={ + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + "Connection": "keep-alive", + }, + ) + + yield rsps + + +@pytest.fixture +def mock_hf_chat_completion_api_streaming_tools(): + # type: () -> Any + """Mock streaming HuggingFace chat completion API with tool calls.""" + with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: + model_name = "test-model" + + # Mock model info endpoint + rsps.add( + responses.GET, + MODEL_ENDPOINT.format(model_name=model_name), + json={ + "id": model_name, + "pipeline_tag": "conversational", + "inferenceProviderMapping": { + "hf-inference": { + "status": "live", + "providerId": model_name, + "task": "conversational", + } + }, + }, + status=200, + ) + + # Mock chat completion streaming endpoint + streaming_chat_response = ( + b'data:{"id":"xyz-123","created":1234567890,"model":"test-model-123","system_fingerprint":"fp_123","choices":[{"delta":{"role":"assistant","content":"response with tool calls follows"},"index":0,"finish_reason":null}],"usage":null}\n\n' + b'data:{"id":"xyz-124","created":1234567890,"model":"test-model-123","system_fingerprint":"fp_123","choices":[{"delta":{"role":"assistant","tool_calls": [{"id": "call_123","type": "function","function": {"name": "get_weather", "arguments": {"location": "Paris"}}}]},"index":0,"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":183,"completion_tokens":14,"total_tokens":197}}\n\n' ) - else: - post_mock = mock.Mock( - return_value=b'[{"generated_text": "the model response"}]' + + rsps.add( + responses.POST, + INFERENCE_ENDPOINT.format(model_name=model_name) + "/v1/chat/completions", + body=streaming_chat_response, + status=200, + headers={ + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + "Connection": "keep-alive", + }, ) - mock_client_post(client, post_mock) - with start_transaction(name="huggingface_hub tx"): - response = client.text_generation( - prompt="hello", - details=details_arg, + yield rsps + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +@pytest.mark.parametrize("include_prompts", [True, False]) +def test_text_generation( + sentry_init, + capture_events, + send_default_pii, + include_prompts, + mock_hf_text_generation_api, +): + # type: (Any, Any, Any, Any, Any) -> None + sentry_init( + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + ) + events = capture_events() + + client = InferenceClient(model="test-model") + + with sentry_sdk.start_transaction(name="test"): + client.text_generation( + "Hello", stream=False, + details=True, ) - if details_arg: - assert response.generated_text == "the model response" - else: - assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "ai.chat_completions.create.huggingface_hub" + + (transaction,) = events + (span,) = transaction["spans"] + + assert span["op"] == "gen_ai.generate_text" + assert span["description"] == "generate_text test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + + expected_data = { + "gen_ai.operation.name": "generate_text", + "gen_ai.request.model": "test-model", + "gen_ai.response.finish_reasons": "length", + "gen_ai.response.streaming": False, + "gen_ai.usage.total_tokens": 10, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES] - assert "the model response" in span["data"][SPANDATA.AI_RESPONSES] - else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] - - if details_arg: - assert span["data"]["gen_ai.usage.total_tokens"] == 10 - - -@pytest.mark.parametrize( - "send_default_pii, include_prompts, details_arg", - itertools.product([True, False], repeat=3), -) -def test_streaming_chat_completion( - sentry_init, capture_events, send_default_pii, include_prompts, details_arg + expected_data["gen_ai.request.messages"] = "Hello" + expected_data["gen_ai.response.text"] = "[mocked] Hello! How can i help you?" + + if not send_default_pii or not include_prompts: + assert "gen_ai.request.messages" not in expected_data + assert "gen_ai.response.text" not in expected_data + + assert span["data"] == expected_data + + # text generation does not set the response model + assert "gen_ai.response.model" not in span["data"] + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +@pytest.mark.parametrize("include_prompts", [True, False]) +def test_text_generation_streaming( + sentry_init, + capture_events, + send_default_pii, + include_prompts, + mock_hf_text_generation_api_streaming, ): + # type: (Any, Any, Any, Any, Any) -> None sentry_init( + traces_sample_rate=1.0, + send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + ) + events = capture_events() + + client = InferenceClient(model="test-model") + + with sentry_sdk.start_transaction(name="test"): + for _ in client.text_generation( + prompt="Hello", + stream=True, + details=True, + ): + pass + + (transaction,) = events + (span,) = transaction["spans"] + + assert span["op"] == "gen_ai.generate_text" + assert span["description"] == "generate_text test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + + expected_data = { + "gen_ai.operation.name": "generate_text", + "gen_ai.request.model": "test-model", + "gen_ai.response.finish_reasons": "length", + "gen_ai.response.streaming": True, + "gen_ai.usage.total_tokens": 10, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + if send_default_pii and include_prompts: + expected_data["gen_ai.request.messages"] = "Hello" + expected_data["gen_ai.response.text"] = "the mocked model response" + + if not send_default_pii or not include_prompts: + assert "gen_ai.request.messages" not in expected_data + assert "gen_ai.response.text" not in expected_data + + assert span["data"] == expected_data + + # text generation does not set the response model + assert "gen_ai.response.model" not in span["data"] + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +@pytest.mark.parametrize("include_prompts", [True, False]) +def test_chat_completion( + sentry_init, + capture_events, + send_default_pii, + include_prompts, + mock_hf_chat_completion_api, +): + # type: (Any, Any, Any, Any, Any) -> None + sentry_init( traces_sample_rate=1.0, send_default_pii=send_default_pii, + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) events = capture_events() - client = InferenceClient(model="https://") - - post_mock = mock.Mock( - return_value=[ - b"""data:{ - "token":{"id":1, "special": false, "text": "the model "} - }""", - b"""data:{ - "token":{"id":2, "special": false, "text": "response"}, - "details":{"finish_reason": "length", "generated_tokens": 10, "seed": 0} - }""", - ] + client = InferenceClient(model="test-model") + + with sentry_sdk.start_transaction(name="test"): + client.chat_completion( + messages=[{"role": "user", "content": "Hello!"}], + stream=False, + ) + + (transaction,) = events + (span,) = transaction["spans"] + + assert span["op"] == "gen_ai.chat" + assert span["description"] == "chat test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + + expected_data = { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "test-model", + "gen_ai.response.finish_reasons": "stop", + "gen_ai.response.model": "test-model-123", + "gen_ai.response.streaming": False, + "gen_ai.usage.input_tokens": 10, + "gen_ai.usage.output_tokens": 8, + "gen_ai.usage.total_tokens": 18, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + if send_default_pii and include_prompts: + expected_data["gen_ai.request.messages"] = ( + '[{"role": "user", "content": "Hello!"}]' + ) + expected_data["gen_ai.response.text"] = ( + "[mocked] Hello! How can I help you today?" + ) + + if not send_default_pii or not include_prompts: + assert "gen_ai.request.messages" not in expected_data + assert "gen_ai.response.text" not in expected_data + + assert span["data"] == expected_data + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +@pytest.mark.parametrize("include_prompts", [True, False]) +def test_chat_completion_streaming( + sentry_init, + capture_events, + send_default_pii, + include_prompts, + mock_hf_chat_completion_api_streaming, +): + # type: (Any, Any, Any, Any, Any) -> None + sentry_init( + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - mock_client_post(client, post_mock) + events = capture_events() + + client = InferenceClient(model="test-model") - with start_transaction(name="huggingface_hub tx"): - response = list( - client.text_generation( - prompt="hello", - details=details_arg, + with sentry_sdk.start_transaction(name="test"): + _ = list( + client.chat_completion( + [{"role": "user", "content": "Hello!"}], stream=True, ) ) - assert len(response) == 2 - if details_arg: - assert response[0].token.text + response[1].token.text == "the model response" - else: - assert response[0] + response[1] == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "ai.chat_completions.create.huggingface_hub" + (transaction,) = events + (span,) = transaction["spans"] + + assert span["op"] == "gen_ai.chat" + assert span["description"] == "chat test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + + expected_data = { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "test-model", + "gen_ai.response.finish_reasons": "stop", + "gen_ai.response.model": "test-model-123", + "gen_ai.response.streaming": True, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + # usage is not available in older versions of the library + if HF_VERSION and HF_VERSION >= (0, 26, 0): + expected_data["gen_ai.usage.input_tokens"] = 183 + expected_data["gen_ai.usage.output_tokens"] = 14 + expected_data["gen_ai.usage.total_tokens"] = 197 if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES] - assert "the model response" in span["data"][SPANDATA.AI_RESPONSES] - else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + expected_data["gen_ai.request.messages"] = ( + '[{"role": "user", "content": "Hello!"}]' + ) + expected_data["gen_ai.response.text"] = "the mocked model response" - if details_arg: - assert span["data"]["gen_ai.usage.total_tokens"] == 10 + if not send_default_pii or not include_prompts: + assert "gen_ai.request.messages" not in expected_data + assert "gen_ai.response.text" not in expected_data + assert span["data"] == expected_data -def test_bad_chat_completion(sentry_init, capture_events): - sentry_init(integrations=[HuggingfaceHubIntegration()], traces_sample_rate=1.0) + +def test_chat_completion_api_error( + sentry_init, capture_events, mock_hf_api_with_errors +): + # type: (Any, Any, Any) -> None + sentry_init(traces_sample_rate=1.0) events = capture_events() - client = InferenceClient(model="https://") - post_mock = mock.Mock(side_effect=OverloadedError("The server is overloaded")) - mock_client_post(client, post_mock) + client = InferenceClient(model="test-model") + + with sentry_sdk.start_transaction(name="test"): + with pytest.raises(HfHubHTTPError): + client.chat_completion( + messages=[{"role": "user", "content": "Hello!"}], + ) + + ( + error, + transaction, + ) = events - with pytest.raises(OverloadedError): - client.text_generation(prompt="hello") + assert error["exception"]["values"][0]["mechanism"]["type"] == "huggingface_hub" + assert not error["exception"]["values"][0]["mechanism"]["handled"] - (event,) = events - assert event["level"] == "error" + (span,) = transaction["spans"] + assert span["op"] == "gen_ai.chat" + assert span["description"] == "chat test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + assert span.get("tags", {}).get("status") == "error" -def test_span_origin(sentry_init, capture_events): + assert ( + error["contexts"]["trace"]["trace_id"] + == transaction["contexts"]["trace"]["trace_id"] + ) + expected_data = { + "gen_ai.operation.name": "chat", + "gen_ai.request.model": "test-model", + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + assert span["data"] == expected_data + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +@pytest.mark.parametrize("include_prompts", [True, False]) +def test_chat_completion_with_tools( + sentry_init, + capture_events, + send_default_pii, + include_prompts, + mock_hf_chat_completion_api_tools, +): + # type: (Any, Any, Any, Any, Any) -> None sentry_init( - integrations=[HuggingfaceHubIntegration()], traces_sample_rate=1.0, + send_default_pii=send_default_pii, + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) events = capture_events() - client = InferenceClient(model="https://") - post_mock = mock.Mock( - return_value=[ - b"""data:{ - "token":{"id":1, "special": false, "text": "the model "} - }""", - ] + client = InferenceClient(model="test-model") + + tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get current weather", + "parameters": { + "type": "object", + "properties": {"location": {"type": "string"}}, + "required": ["location"], + }, + }, + } + ] + + with sentry_sdk.start_transaction(name="test"): + client.chat_completion( + messages=[{"role": "user", "content": "What is the weather in Paris?"}], + tools=tools, + tool_choice="auto", + ) + + (transaction,) = events + (span,) = transaction["spans"] + + assert span["op"] == "gen_ai.chat" + assert span["description"] == "chat test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + + expected_data = { + "gen_ai.operation.name": "chat", + "gen_ai.request.available_tools": '[{"type": "function", "function": {"name": "get_weather", "description": "Get current weather", "parameters": {"type": "object", "properties": {"location": {"type": "string"}}, "required": ["location"]}}}]', + "gen_ai.request.model": "test-model", + "gen_ai.response.finish_reasons": "tool_calls", + "gen_ai.response.model": "test-model-123", + "gen_ai.usage.input_tokens": 10, + "gen_ai.usage.output_tokens": 8, + "gen_ai.usage.total_tokens": 18, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + if send_default_pii and include_prompts: + expected_data["gen_ai.request.messages"] = ( + '[{"role": "user", "content": "What is the weather in Paris?"}]' + ) + expected_data["gen_ai.response.tool_calls"] = ( + '[{"function": {"arguments": {"location": "Paris"}, "name": "get_weather", "description": "None"}, "id": "call_123", "type": "function"}]' + ) + + if not send_default_pii or not include_prompts: + assert "gen_ai.request.messages" not in expected_data + assert "gen_ai.response.text" not in expected_data + assert "gen_ai.response.tool_calls" not in expected_data + + assert span["data"] == expected_data + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +@pytest.mark.parametrize("include_prompts", [True, False]) +def test_chat_completion_streaming_with_tools( + sentry_init, + capture_events, + send_default_pii, + include_prompts, + mock_hf_chat_completion_api_streaming_tools, +): + # type: (Any, Any, Any, Any, Any) -> None + sentry_init( + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - mock_client_post(client, post_mock) + events = capture_events() - with start_transaction(name="huggingface_hub tx"): - list( - client.text_generation( - prompt="hello", + client = InferenceClient(model="test-model") + + tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get current weather", + "parameters": { + "type": "object", + "properties": {"location": {"type": "string"}}, + "required": ["location"], + }, + }, + } + ] + + with sentry_sdk.start_transaction(name="test"): + _ = list( + client.chat_completion( + messages=[{"role": "user", "content": "What is the weather in Paris?"}], stream=True, + tools=tools, + tool_choice="auto", ) ) - (event,) = events + (transaction,) = events + (span,) = transaction["spans"] + + assert span["op"] == "gen_ai.chat" + assert span["description"] == "chat test-model" + assert span["origin"] == "auto.ai.huggingface_hub" + + expected_data = { + "gen_ai.operation.name": "chat", + "gen_ai.request.available_tools": '[{"type": "function", "function": {"name": "get_weather", "description": "Get current weather", "parameters": {"type": "object", "properties": {"location": {"type": "string"}}, "required": ["location"]}}}]', + "gen_ai.request.model": "test-model", + "gen_ai.response.finish_reasons": "tool_calls", + "gen_ai.response.model": "test-model-123", + "gen_ai.response.streaming": True, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + if HF_VERSION and HF_VERSION >= (0, 26, 0): + expected_data["gen_ai.usage.input_tokens"] = 183 + expected_data["gen_ai.usage.output_tokens"] = 14 + expected_data["gen_ai.usage.total_tokens"] = 197 + + if send_default_pii and include_prompts: + expected_data["gen_ai.request.messages"] = ( + '[{"role": "user", "content": "What is the weather in Paris?"}]' + ) + expected_data["gen_ai.response.text"] = "response with tool calls follows" + expected_data["gen_ai.response.tool_calls"] = ( + '[{"function": {"arguments": {"location": "Paris"}, "name": "get_weather"}, "id": "call_123", "type": "function", "index": "None"}]' + ) + + if not send_default_pii or not include_prompts: + assert "gen_ai.request.messages" not in expected_data + assert "gen_ai.response.text" not in expected_data + assert "gen_ai.response.tool_calls" not in expected_data - assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.huggingface_hub" + assert span["data"] == expected_data diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index fab8d9e13f..047b919213 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -115,6 +115,7 @@ async def test_agent_invocation_span( sentry_init( integrations=[OpenAIAgentsIntegration()], traces_sample_rate=1.0, + send_default_pii=True, ) events = capture_events() @@ -134,6 +135,21 @@ async def test_agent_invocation_span( assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" assert invoke_agent_span["description"] == "invoke_agent test_agent" + assert invoke_agent_span["data"]["gen_ai.request.messages"] == safe_serialize( + [ + { + "content": [ + {"text": "You are a helpful test assistant.", "type": "text"} + ], + "role": "system", + }, + {"content": [{"text": "Test input", "type": "text"}], "role": "user"}, + ] + ) + assert ( + invoke_agent_span["data"]["gen_ai.response.text"] + == "Hello, how can I help you?" + ) assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" assert invoke_agent_span["data"]["gen_ai.system"] == "openai" assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" diff --git a/tests/integrations/stdlib/test_httplib.py b/tests/integrations/stdlib/test_httplib.py index f6735d0e74..b8d46d0558 100644 --- a/tests/integrations/stdlib/test_httplib.py +++ b/tests/integrations/stdlib/test_httplib.py @@ -236,7 +236,7 @@ def test_outgoing_trace_headers_head_sdk(sentry_init, monkeypatch): monkeypatch.setattr(HTTPSConnection, "send", mock_send) sentry_init(traces_sample_rate=0.5, release="foo") - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.25): + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=250000): transaction = Transaction.continue_from_headers({}) with start_transaction(transaction=transaction, name="Head SDK tx") as transaction: diff --git a/tests/profiler/test_continuous_profiler.py b/tests/profiler/test_continuous_profiler.py index 7283ec7164..e4f5cb5e25 100644 --- a/tests/profiler/test_continuous_profiler.py +++ b/tests/profiler/test_continuous_profiler.py @@ -8,6 +8,7 @@ import sentry_sdk from sentry_sdk.consts import VERSION from sentry_sdk.profiler.continuous_profiler import ( + is_profile_session_sampled, get_profiler_id, setup_continuous_profiler, start_profiler, @@ -113,19 +114,25 @@ def test_continuous_profiler_valid_mode(mode, make_options, teardown_profiling): ], ) def test_continuous_profiler_setup_twice(mode, make_options, teardown_profiling): - options = make_options(mode=mode) + assert not is_profile_session_sampled() + # setting up the first time should return True to indicate success + options = make_options(mode=mode, profile_session_sample_rate=1.0) assert setup_continuous_profiler( options, mock_sdk_info, lambda envelope: None, ) - # setting up the second time should return False to indicate no-op - assert not setup_continuous_profiler( + assert is_profile_session_sampled() + + # setting up the second time should return True to indicate re-init + options = make_options(mode=mode, profile_session_sample_rate=0.0) + assert setup_continuous_profiler( options, mock_sdk_info, lambda envelope: None, ) + assert not is_profile_session_sampled() def assert_single_transaction_with_profile_chunks( diff --git a/tests/test_dsc.py b/tests/test_dsc.py index 8e549d0cf8..6097af7f95 100644 --- a/tests/test_dsc.py +++ b/tests/test_dsc.py @@ -175,7 +175,7 @@ def my_traces_sampler(sampling_context): } # We continue the incoming trace and start a new transaction - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.125): + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=125000): transaction = sentry_sdk.continue_trace(incoming_http_headers) with sentry_sdk.start_transaction(transaction, name="foo"): pass diff --git a/tests/test_envelope.py b/tests/test_envelope.py index d1bc668f05..06f8971dc3 100644 --- a/tests/test_envelope.py +++ b/tests/test_envelope.py @@ -1,4 +1,4 @@ -from sentry_sdk.envelope import Envelope +from sentry_sdk.envelope import Envelope, Item, PayloadRef from sentry_sdk.session import Session from sentry_sdk import capture_event import sentry_sdk.client @@ -239,3 +239,24 @@ def test_envelope_without_headers(): assert len(items) == 1 assert items[0].payload.get_bytes() == b'{"started": "2020-02-07T14:16:00Z"}' + + +def test_envelope_item_data_category_mapping(): + """Test that envelope items map to correct data categories for rate limiting.""" + test_cases = [ + ("event", "error"), + ("transaction", "transaction"), + ("log", "log_item"), + ("session", "session"), + ("attachment", "attachment"), + ("client_report", "internal"), + ("profile", "profile"), + ("profile_chunk", "profile_chunk"), + ("statsd", "metric_bucket"), + ("check_in", "monitor"), + ("unknown_type", "default"), + ] + + for item_type, expected_category in test_cases: + item = Item(payload=PayloadRef(json={"test": "data"}), type=item_type) + assert item.data_category == expected_category diff --git a/tests/test_monitor.py b/tests/test_monitor.py index b48d9f6282..9ffc943bed 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -73,7 +73,7 @@ def test_transaction_uses_downsampled_rate( assert monitor.downsample_factor == 1 # make sure we don't sample the transaction - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.75): + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=750000): with sentry_sdk.start_transaction(name="foobar") as transaction: assert transaction.sampled is False assert transaction.sample_rate == 0.5 diff --git a/tests/test_propagationcontext.py b/tests/test_propagationcontext.py index a0ce1094fa..078a69c72b 100644 --- a/tests/test_propagationcontext.py +++ b/tests/test_propagationcontext.py @@ -136,13 +136,13 @@ def test_sample_rand_filled(parent_sampled, sample_rate, expected_interval): else: sample_rate_str = "" - # for convenience, we'll just return the lower bound of the interval - mock_uniform = mock.Mock(return_value=expected_interval[0]) + # for convenience, we'll just return the lower bound of the interval as an integer + mock_randrange = mock.Mock(return_value=int(expected_interval[0] * 1000000)) def mock_random_class(seed): assert seed == "00000000000000000000000000000000", "seed should be the trace_id" rv = Mock() - rv.uniform = mock_uniform + rv.randrange = mock_randrange return rv with mock.patch("sentry_sdk.tracing_utils.Random", mock_random_class): @@ -158,17 +158,20 @@ def mock_random_class(seed): ctx.dynamic_sampling_context["sample_rand"] == f"{expected_interval[0]:.6f}" # noqa: E231 ) - assert mock_uniform.call_count == 1 - assert mock_uniform.call_args[0] == expected_interval + assert mock_randrange.call_count == 1 + assert mock_randrange.call_args[0] == ( + int(expected_interval[0] * 1000000), + int(expected_interval[1] * 1000000), + ) def test_sample_rand_rounds_down(): # Mock value that should round down to 0.999_999 - mock_uniform = mock.Mock(return_value=0.999_999_9) + mock_randrange = mock.Mock(return_value=999999) def mock_random_class(_): rv = Mock() - rv.uniform = mock_uniform + rv.randrange = mock_randrange return rv with mock.patch("sentry_sdk.tracing_utils.Random", mock_random_class): diff --git a/tests/test_transport.py b/tests/test_transport.py index c6a1a0a7a7..e493515e9a 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -611,7 +611,7 @@ def test_metric_bucket_limits(capturing_server, response_code, make_client): assert capturing_server.captured[0].path == "/api/132/envelope/" capturing_server.clear_captured() - assert set(client.transport._disabled_until) == set(["metric_bucket"]) + assert set(client.transport._disabled_until) == {"metric_bucket"} client.transport.capture_envelope(envelope) client.capture_event({"type": "transaction"}) @@ -629,6 +629,43 @@ def test_metric_bucket_limits(capturing_server, response_code, make_client): ] +@pytest.mark.parametrize("response_code", [200, 429]) +def test_log_item_limits(capturing_server, response_code, make_client): + client = make_client() + capturing_server.respond_with( + code=response_code, + headers={ + "X-Sentry-Rate-Limits": "4711:log_item:organization:quota_exceeded:custom" + }, + ) + + envelope = Envelope() + envelope.add_item(Item(payload=b"{}", type="log")) + client.transport.capture_envelope(envelope) + client.flush() + + assert len(capturing_server.captured) == 1 + assert capturing_server.captured[0].path == "/api/132/envelope/" + capturing_server.clear_captured() + + assert set(client.transport._disabled_until) == {"log_item"} + + client.transport.capture_envelope(envelope) + client.capture_event({"type": "transaction"}) + client.flush() + + assert len(capturing_server.captured) == 2 + + envelope = capturing_server.captured[0].envelope + assert envelope.items[0].type == "transaction" + envelope = capturing_server.captured[1].envelope + assert envelope.items[0].type == "client_report" + report = parse_json(envelope.items[0].get_bytes()) + assert report["discarded_events"] == [ + {"category": "log_item", "reason": "ratelimit_backoff", "quantity": 1}, + ] + + @pytest.mark.parametrize("response_code", [200, 429]) def test_metric_bucket_limits_with_namespace( capturing_server, response_code, make_client diff --git a/tests/tracing/test_integration_tests.py b/tests/tracing/test_integration_tests.py index 61ef14b7d0..8b5659b694 100644 --- a/tests/tracing/test_integration_tests.py +++ b/tests/tracing/test_integration_tests.py @@ -169,7 +169,7 @@ def test_dynamic_sampling_head_sdk_creates_dsc( envelopes = capture_envelopes() # make sure transaction is sampled for both cases - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", return_value=0.25): + with mock.patch("sentry_sdk.tracing_utils.Random.randrange", return_value=250000): transaction = Transaction.continue_from_headers({}, name="Head SDK tx") # will create empty mutable baggage diff --git a/tests/tracing/test_sample_rand.py b/tests/tracing/test_sample_rand.py index f9c10aa04e..4a74950b30 100644 --- a/tests/tracing/test_sample_rand.py +++ b/tests/tracing/test_sample_rand.py @@ -1,5 +1,3 @@ -import decimal -from decimal import Inexact, FloatOperation from unittest import mock import pytest @@ -20,7 +18,8 @@ def test_deterministic_sampled(sentry_init, capture_events, sample_rate, sample_ events = capture_events() with mock.patch( - "sentry_sdk.tracing_utils.Random.uniform", return_value=sample_rand + "sentry_sdk.tracing_utils.Random.randrange", + return_value=int(sample_rand * 1000000), ): with sentry_sdk.start_transaction() as transaction: assert ( @@ -55,35 +54,3 @@ def test_transaction_uses_incoming_sample_rand( # Transaction event captured if sample_rand < sample_rate, indicating that # sample_rand is used to make the sampling decision. assert len(events) == int(sample_rand < sample_rate) - - -def test_decimal_context(sentry_init, capture_events): - """ - Ensure that having a user altered decimal context with a precision below 6 - does not cause an InvalidOperation exception. - """ - sentry_init(traces_sample_rate=1.0) - events = capture_events() - - old_prec = decimal.getcontext().prec - old_inexact = decimal.getcontext().traps[Inexact] - old_float_operation = decimal.getcontext().traps[FloatOperation] - - decimal.getcontext().prec = 2 - decimal.getcontext().traps[Inexact] = True - decimal.getcontext().traps[FloatOperation] = True - - try: - with mock.patch( - "sentry_sdk.tracing_utils.Random.uniform", return_value=0.123456789 - ): - with sentry_sdk.start_transaction() as transaction: - assert ( - transaction.get_baggage().sentry_items["sample_rand"] == "0.123456" - ) - finally: - decimal.getcontext().prec = old_prec - decimal.getcontext().traps[Inexact] = old_inexact - decimal.getcontext().traps[FloatOperation] = old_float_operation - - assert len(events) == 1 diff --git a/tests/tracing/test_sample_rand_propagation.py b/tests/tracing/test_sample_rand_propagation.py index ea3ea548ff..e6f3e99510 100644 --- a/tests/tracing/test_sample_rand_propagation.py +++ b/tests/tracing/test_sample_rand_propagation.py @@ -35,9 +35,9 @@ def test_continue_trace_missing_sample_rand(): "baggage": "sentry-placeholder=asdf", } - mock_uniform = Mock(return_value=0.5) - - with mock.patch("sentry_sdk.tracing_utils.Random.uniform", mock_uniform): + with mock.patch( + "sentry_sdk.tracing_utils.Random.randrange", Mock(return_value=500000) + ): transaction = sentry_sdk.continue_trace(headers) assert transaction.get_baggage().sentry_items["sample_rand"] == "0.500000" diff --git a/tox.ini b/tox.ini index ff2403f515..5fe52a1e2b 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-09-08T11:35:09.849536+00:00 +# Last generated: 2025-09-15T12:28:26.599446+00:00 [tox] requires = @@ -98,12 +98,12 @@ envlist = {py3.8,py3.11,py3.12}-anthropic-v0.16.0 {py3.8,py3.11,py3.12}-anthropic-v0.33.1 {py3.8,py3.11,py3.12}-anthropic-v0.50.0 - {py3.8,py3.12,py3.13}-anthropic-v0.66.0 + {py3.8,py3.12,py3.13}-anthropic-v0.67.0 {py3.9,py3.10,py3.11}-cohere-v5.4.0 {py3.9,py3.11,py3.12}-cohere-v5.9.4 {py3.9,py3.11,py3.12}-cohere-v5.13.12 - {py3.9,py3.11,py3.12}-cohere-v5.17.0 + {py3.9,py3.11,py3.12}-cohere-v5.18.0 {py3.9,py3.11,py3.12}-langchain-base-v0.1.20 {py3.9,py3.11,py3.12}-langchain-base-v0.2.17 @@ -116,12 +116,12 @@ envlist = {py3.8,py3.11,py3.12}-openai-base-v1.0.1 {py3.8,py3.11,py3.12}-openai-base-v1.36.1 {py3.8,py3.11,py3.12}-openai-base-v1.71.0 - {py3.8,py3.12,py3.13}-openai-base-v1.106.1 + {py3.8,py3.12,py3.13}-openai-base-v1.107.2 {py3.8,py3.11,py3.12}-openai-notiktoken-v1.0.1 {py3.8,py3.11,py3.12}-openai-notiktoken-v1.36.1 {py3.8,py3.11,py3.12}-openai-notiktoken-v1.71.0 - {py3.8,py3.12,py3.13}-openai-notiktoken-v1.106.1 + {py3.8,py3.12,py3.13}-openai-notiktoken-v1.107.2 {py3.9,py3.12,py3.13}-langgraph-v0.6.7 {py3.10,py3.12,py3.13}-langgraph-v1.0.0a3 @@ -129,9 +129,10 @@ envlist = {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 {py3.10,py3.12,py3.13}-openai_agents-v0.2.11 + {py3.10,py3.12,py3.13}-openai_agents-v0.3.0 - {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 - {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 + {py3.8,py3.10,py3.11}-huggingface_hub-v0.24.7 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.27.1 {py3.8,py3.12,py3.13}-huggingface_hub-v0.30.2 {py3.8,py3.12,py3.13}-huggingface_hub-v0.34.4 {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.0rc0 @@ -141,7 +142,7 @@ envlist = {py3.6,py3.7}-boto3-v1.12.49 {py3.6,py3.9,py3.10}-boto3-v1.20.54 {py3.7,py3.11,py3.12}-boto3-v1.28.85 - {py3.9,py3.12,py3.13}-boto3-v1.40.25 + {py3.9,py3.12,py3.13}-boto3-v1.40.30 {py3.6,py3.7,py3.8}-chalice-v1.16.0 {py3.6,py3.7,py3.8}-chalice-v1.21.9 @@ -160,7 +161,7 @@ envlist = {py3.6}-pymongo-v3.5.1 {py3.6,py3.10,py3.11}-pymongo-v3.13.0 {py3.6,py3.9,py3.10}-pymongo-v4.0.2 - {py3.9,py3.12,py3.13}-pymongo-v4.14.1 + {py3.9,py3.12,py3.13}-pymongo-v4.15.0 {py3.6}-redis_py_cluster_legacy-v1.3.6 {py3.6,py3.7}-redis_py_cluster_legacy-v2.0.0 @@ -183,7 +184,7 @@ envlist = {py3.7,py3.12,py3.13}-statsig-v0.55.3 {py3.7,py3.12,py3.13}-statsig-v0.58.4 {py3.7,py3.12,py3.13}-statsig-v0.61.0 - {py3.7,py3.12,py3.13}-statsig-v0.63.0 + {py3.7,py3.12,py3.13}-statsig-v0.64.0 {py3.8,py3.12,py3.13}-unleash-v6.0.1 {py3.8,py3.12,py3.13}-unleash-v6.1.0 @@ -233,6 +234,7 @@ envlist = {py3.6,py3.7,py3.8}-celery-v4.4.7 {py3.6,py3.7,py3.8}-celery-v5.0.5 {py3.8,py3.12,py3.13}-celery-v5.5.3 + {py3.8,py3.12,py3.13}-celery-v5.6.0b1 {py3.6,py3.7}-dramatiq-v1.9.0 {py3.6,py3.8,py3.9}-dramatiq-v1.12.3 @@ -263,9 +265,9 @@ envlist = {py3.9,py3.12,py3.13}-flask-v3.1.2 {py3.6,py3.9,py3.10}-starlette-v0.16.0 - {py3.7,py3.10,py3.11}-starlette-v0.26.1 - {py3.8,py3.11,py3.12}-starlette-v0.36.3 - {py3.9,py3.12,py3.13}-starlette-v0.47.3 + {py3.7,py3.10,py3.11}-starlette-v0.27.0 + {py3.8,py3.12,py3.13}-starlette-v0.38.6 + {py3.9,py3.12,py3.13}-starlette-v0.48.0 {py3.6,py3.9,py3.10}-fastapi-v0.79.1 {py3.7,py3.10,py3.11}-fastapi-v0.91.0 @@ -358,6 +360,7 @@ deps = {py3.8,py3.9,py3.10,py3.11,py3.12}-gevent: pytest gevent: pytest-asyncio {py3.10,py3.11}-gevent: zope.event<5.0.0 + {py3.10,py3.11}-gevent: zope.interface<8.0 # === Integrations === @@ -459,7 +462,7 @@ deps = anthropic-v0.16.0: anthropic==0.16.0 anthropic-v0.33.1: anthropic==0.33.1 anthropic-v0.50.0: anthropic==0.50.0 - anthropic-v0.66.0: anthropic==0.66.0 + anthropic-v0.67.0: anthropic==0.67.0 anthropic: pytest-asyncio anthropic-v0.16.0: httpx<0.28.0 anthropic-v0.33.1: httpx<0.28.0 @@ -467,7 +470,7 @@ deps = cohere-v5.4.0: cohere==5.4.0 cohere-v5.9.4: cohere==5.9.4 cohere-v5.13.12: cohere==5.13.12 - cohere-v5.17.0: cohere==5.17.0 + cohere-v5.18.0: cohere==5.18.0 langchain-base-v0.1.20: langchain==0.1.20 langchain-base-v0.2.17: langchain==0.2.17 @@ -487,7 +490,7 @@ deps = openai-base-v1.0.1: openai==1.0.1 openai-base-v1.36.1: openai==1.36.1 openai-base-v1.71.0: openai==1.71.0 - openai-base-v1.106.1: openai==1.106.1 + openai-base-v1.107.2: openai==1.107.2 openai-base: pytest-asyncio openai-base: tiktoken openai-base-v1.0.1: httpx<0.28 @@ -496,7 +499,7 @@ deps = openai-notiktoken-v1.0.1: openai==1.0.1 openai-notiktoken-v1.36.1: openai==1.36.1 openai-notiktoken-v1.71.0: openai==1.71.0 - openai-notiktoken-v1.106.1: openai==1.106.1 + openai-notiktoken-v1.107.2: openai==1.107.2 openai-notiktoken: pytest-asyncio openai-notiktoken-v1.0.1: httpx<0.28 openai-notiktoken-v1.36.1: httpx<0.28 @@ -507,20 +510,22 @@ deps = openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 openai_agents-v0.2.11: openai-agents==0.2.11 + openai_agents-v0.3.0: openai-agents==0.3.0 openai_agents: pytest-asyncio - huggingface_hub-v0.22.2: huggingface_hub==0.22.2 - huggingface_hub-v0.26.5: huggingface_hub==0.26.5 + huggingface_hub-v0.24.7: huggingface_hub==0.24.7 + huggingface_hub-v0.27.1: huggingface_hub==0.27.1 huggingface_hub-v0.30.2: huggingface_hub==0.30.2 huggingface_hub-v0.34.4: huggingface_hub==0.34.4 huggingface_hub-v0.35.0rc0: huggingface_hub==0.35.0rc0 + huggingface_hub: responses # ~~~ Cloud ~~~ boto3-v1.12.49: boto3==1.12.49 boto3-v1.20.54: boto3==1.20.54 boto3-v1.28.85: boto3==1.28.85 - boto3-v1.40.25: boto3==1.40.25 + boto3-v1.40.30: boto3==1.40.30 {py3.7,py3.8}-boto3: urllib3<2.0.0 chalice-v1.16.0: chalice==1.16.0 @@ -542,7 +547,7 @@ deps = pymongo-v3.5.1: pymongo==3.5.1 pymongo-v3.13.0: pymongo==3.13.0 pymongo-v4.0.2: pymongo==4.0.2 - pymongo-v4.14.1: pymongo==4.14.1 + pymongo-v4.15.0: pymongo==4.15.0 pymongo: mockupdb redis_py_cluster_legacy-v1.3.6: redis-py-cluster==1.3.6 @@ -566,7 +571,7 @@ deps = statsig-v0.55.3: statsig==0.55.3 statsig-v0.58.4: statsig==0.58.4 statsig-v0.61.0: statsig==0.61.0 - statsig-v0.63.0: statsig==0.63.0 + statsig-v0.64.0: statsig==0.64.0 statsig: typing_extensions unleash-v6.0.1: UnleashClient==6.0.1 @@ -637,6 +642,7 @@ deps = celery-v4.4.7: celery==4.4.7 celery-v5.0.5: celery==5.0.5 celery-v5.5.3: celery==5.5.3 + celery-v5.6.0b1: celery==5.6.0b1 celery: newrelic<10.17.0 celery: redis {py3.7}-celery: importlib-metadata<5.0 @@ -696,9 +702,9 @@ deps = flask-v1.1.4: markupsafe<2.1.0 starlette-v0.16.0: starlette==0.16.0 - starlette-v0.26.1: starlette==0.26.1 - starlette-v0.36.3: starlette==0.36.3 - starlette-v0.47.3: starlette==0.47.3 + starlette-v0.27.0: starlette==0.27.0 + starlette-v0.38.6: starlette==0.38.6 + starlette-v0.48.0: starlette==0.48.0 starlette: pytest-asyncio starlette: python-multipart starlette: requests @@ -706,8 +712,7 @@ deps = starlette: jinja2 starlette: httpx starlette-v0.16.0: httpx<0.28.0 - starlette-v0.26.1: httpx<0.28.0 - starlette-v0.36.3: httpx<0.28.0 + starlette-v0.27.0: httpx<0.28.0 {py3.6}-starlette: aiocontextvars fastapi-v0.79.1: fastapi==0.79.1