From 1785c581817f33666b695447a4a06181c5e8f2aa Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 10 Apr 2020 08:31:11 +0100 Subject: [PATCH 01/21] docs: add pypi badge --- python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/README.md b/python/README.md index f369e359939..033bdeb787c 100644 --- a/python/README.md +++ b/python/README.md @@ -1,6 +1,6 @@ # Lambda Powertools -![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) +![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) [![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg)](https://badge.fury.io/py/aws-lambda-powertools) A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier - Currently available for Python only and compatible with Python >=3.6. From f1f76ea7443b361c005ec55a94a721f560878b32 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 10 Apr 2020 08:31:40 +0100 Subject: [PATCH 02/21] fix: add missing single_metric example; test var name --- python/example/hello_world/app.py | 3 ++ python/tests/functional/test_metrics.py | 48 ++++++++++++------------- 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/python/example/hello_world/app.py b/python/example/hello_world/app.py index c8f5060bdab..033e55dcb03 100644 --- a/python/example/hello_world/app.py +++ b/python/example/hello_world/app.py @@ -52,6 +52,9 @@ def lambda_handler(event, context): logger.error(e) raise e + with single_metric(name="UniqueMetricDimension", unit="Seconds", value=1) as metric: + metric.add_dimension(name="unique_dimension", value="for_unique_metric") + logger.info("Returning message to the caller") return { "statusCode": 200, diff --git a/python/tests/functional/test_metrics.py b/python/tests/functional/test_metrics.py index 1bd20d8cec3..95b0422c9f2 100644 --- a/python/tests/functional/test_metrics.py +++ b/python/tests/functional/test_metrics.py @@ -84,9 +84,9 @@ def remove_timestamp(metrics: List): def test_single_metric(capsys, metric, dimension, namespace): - with single_metric(**metric) as my_metrics: - my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) output = json.loads(capsys.readouterr().out.strip()) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) @@ -96,11 +96,11 @@ def test_single_metric(capsys, metric, dimension, namespace): def test_single_metric_one_metric_only(capsys, metric, dimension, namespace): - with single_metric(**metric) as my_metrics: - my_metrics.add_metric(name="second_metric", unit="Count", value=1) - my_metrics.add_metric(name="third_metric", unit="Seconds", value=1) - my_metrics.add_dimension(**dimension) - my_metrics.add_namespace(**namespace) + with single_metric(**metric) as my_metric: + my_metric.add_metric(name="second_metric", unit="Count", value=1) + my_metric.add_metric(name="third_metric", unit="Seconds", value=1) + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) output = json.loads(capsys.readouterr().out.strip()) expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) @@ -130,11 +130,11 @@ def test_multiple_namespaces(metric, dimension, namespace): namespace_b = {"name": "AnotherNamespace"} with pytest.raises(UniqueNamespaceError): - with single_metric(**metric) as m: - m.add_dimension(**dimension) - m.add_namespace(**namespace) - m.add_namespace(**namespace_a) - m.add_namespace(**namespace_b) + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) + my_metric.add_namespace(**namespace_a) + my_metric.add_namespace(**namespace_b) def test_log_metrics(capsys, metrics, dimensions, namespace): @@ -224,23 +224,23 @@ def test_incorrect_metric_unit(metric, dimension, namespace): metric["unit"] = "incorrect_unit" with pytest.raises(MetricUnitError): - with single_metric(**metric) as m: - m.add_dimension(**dimension) - m.add_namespace(**namespace) + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) def test_schema_no_namespace(metric, dimension): with pytest.raises(SchemaValidationError): - with single_metric(**metric) as m: - m.add_dimension(**dimension) + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) def test_schema_incorrect_value(metric, dimension, namespace): metric["value"] = "some_value" with pytest.raises(MetricValueError): - with single_metric(**metric) as m: - m.add_dimension(**dimension) - m.add_namespace(**namespace) + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) def test_schema_no_metrics(dimensions, namespace): @@ -258,7 +258,7 @@ def test_exceed_number_of_dimensions(metric, namespace): dimensions.append({"name": f"test_{i}", "value": "test"}) with pytest.raises(SchemaValidationError): - with single_metric(**metric) as m: - m.add_namespace(**namespace) + with single_metric(**metric) as my_metric: + my_metric.add_namespace(**namespace) for dimension in dimensions: - m.add_dimension(**dimension) + my_metric.add_dimension(**dimension) From 4300166d3dc616d059e158ae3726e90173cd3cfe Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Tue, 14 Apr 2020 08:43:23 +0100 Subject: [PATCH 03/21] chore: pypi monthly download badge --- python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/README.md b/python/README.md index 033bdeb787c..9dd936f8091 100644 --- a/python/README.md +++ b/python/README.md @@ -1,6 +1,6 @@ # Lambda Powertools -![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) [![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg)](https://badge.fury.io/py/aws-lambda-powertools) +![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg)](https://badge.fury.io/py/aws-lambda-powertools) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier - Currently available for Python only and compatible with Python >=3.6. From 8faaab88e6f5cf46942215abac22908c22ff6ee5 Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Tue, 14 Apr 2020 08:46:40 +0100 Subject: [PATCH 04/21] chore: fix github badge typo --- python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/README.md b/python/README.md index 9dd936f8091..d312db8028a 100644 --- a/python/README.md +++ b/python/README.md @@ -1,6 +1,6 @@ # Lambda Powertools -![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg)](https://badge.fury.io/py/aws-lambda-powertools) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) +![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier - Currently available for Python only and compatible with Python >=3.6. From 114c950b0f679b396c6f1f5fc25d84206e35a95f Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Wed, 15 Apr 2020 18:23:35 +0100 Subject: [PATCH 05/21] feat: add docs to CI --- .github/workflows/python_docs.yml | 40 +++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/python_docs.yml diff --git a/.github/workflows/python_docs.yml b/.github/workflows/python_docs.yml new file mode 100644 index 00000000000..aad141b7f69 --- /dev/null +++ b/.github/workflows/python_docs.yml @@ -0,0 +1,40 @@ +name: deploy + +on: + pull_request: + branches: + - develop + - master + paths: + - "python/**" + push: + branches: + - develop + - master + paths: + - "python/**" + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + make dev + working-directory: ./python/ + - name: build docs + run: | + make docs + working-directory: ./python/ + - name: deploy docs + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: docs/aws_lambda_powertools/ + working-directory: ./python/ From fe594b030794ad5342b579eda01cdd74a2f5ecb5 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Wed, 15 Apr 2020 18:31:50 +0100 Subject: [PATCH 06/21] fix: CI attempt 2 --- .github/workflows/python_docs.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python_docs.yml b/.github/workflows/python_docs.yml index aad141b7f69..26b5fb4a615 100644 --- a/.github/workflows/python_docs.yml +++ b/.github/workflows/python_docs.yml @@ -1,4 +1,4 @@ -name: deploy +name: Powertools Docs Python on: pull_request: @@ -22,7 +22,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: "3.8" - name: Install dependencies run: | python -m pip install --upgrade pip From 88b0817b5b77c7484cebe04d3e803e6049495e74 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Wed, 15 Apr 2020 18:35:20 +0100 Subject: [PATCH 07/21] fix: CI attempt 3 --- .github/workflows/python_docs.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/python_docs.yml b/.github/workflows/python_docs.yml index 26b5fb4a615..753527f9b8e 100644 --- a/.github/workflows/python_docs.yml +++ b/.github/workflows/python_docs.yml @@ -17,6 +17,9 @@ on: jobs: deploy: runs-on: ubuntu-latest + defaults: + run: + working-directory: ./python/ steps: - uses: actions/checkout@v1 - name: Set up Python @@ -27,14 +30,11 @@ jobs: run: | python -m pip install --upgrade pip make dev - working-directory: ./python/ - name: build docs run: | make docs - working-directory: ./python/ - name: deploy docs uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: docs/aws_lambda_powertools/ - working-directory: ./python/ + publish_dir: python/docs/aws_lambda_powertools/ From dea272f848b02be7894d2b7b8fbc97e6c2e8701a Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Wed, 15 Apr 2020 18:36:49 +0100 Subject: [PATCH 08/21] fix: CI attempt 3 --- .github/workflows/python_docs.yml | 40 ----------------------------- .github/workflows/pythonpackage.yml | 23 +++++++++++++++++ 2 files changed, 23 insertions(+), 40 deletions(-) delete mode 100644 .github/workflows/python_docs.yml diff --git a/.github/workflows/python_docs.yml b/.github/workflows/python_docs.yml deleted file mode 100644 index 753527f9b8e..00000000000 --- a/.github/workflows/python_docs.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Powertools Docs Python - -on: - pull_request: - branches: - - develop - - master - paths: - - "python/**" - push: - branches: - - develop - - master - paths: - - "python/**" - -jobs: - deploy: - runs-on: ubuntu-latest - defaults: - run: - working-directory: ./python/ - steps: - - uses: actions/checkout@v1 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: "3.8" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - make dev - - name: build docs - run: | - make docs - - name: deploy docs - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: python/docs/aws_lambda_powertools/ diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml index f52ff952361..f2895cb15e6 100644 --- a/.github/workflows/pythonpackage.yml +++ b/.github/workflows/pythonpackage.yml @@ -40,3 +40,26 @@ jobs: run: | make test working-directory: ./python/ + docs: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./python/ + steps: + - uses: actions/checkout@v1 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: "3.8" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + make dev + - name: build docs + run: | + make docs + - name: deploy docs + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: docs/aws_lambda_powertools/ From 2bab8adebdd09fefd472229a82bc293995611538 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 16 Apr 2020 08:26:36 +0100 Subject: [PATCH 09/21] fix: CI attempt 4 --- .github/workflows/pythonpackage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml index f2895cb15e6..44e661717b1 100644 --- a/.github/workflows/pythonpackage.yml +++ b/.github/workflows/pythonpackage.yml @@ -62,4 +62,4 @@ jobs: uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: docs/aws_lambda_powertools/ + publish_dir: python/docs/aws_lambda_powertools/ From a51945ef5fc9ac237003085e1dbb0001ba77fddc Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 16 Apr 2020 08:55:03 +0100 Subject: [PATCH 10/21] chore: clean up CI workflows --- .github/workflows/python.yml | 38 +++++++++++++++++ .github/workflows/python_docs.yml | 35 ++++++++++++++++ .github/workflows/pythonpackage.yml | 65 ----------------------------- python/Makefile | 2 +- 4 files changed, 74 insertions(+), 66 deletions(-) create mode 100644 .github/workflows/python.yml create mode 100644 .github/workflows/python_docs.yml delete mode 100644 .github/workflows/pythonpackage.yml diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml new file mode 100644 index 00000000000..54d1e4b1a86 --- /dev/null +++ b/.github/workflows/python.yml @@ -0,0 +1,38 @@ +name: Powertools Python + +on: + pull_request: + branches: + - develop + - master + paths: + - "python/**" + push: + branches: + - develop + - master + paths: + - "python/**" + +jobs: + build: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./python/ + strategy: + max-parallel: 4 + matrix: + python-version: [3.6, 3.7, 3.8] + steps: + - uses: actions/checkout@v1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: make dev + - name: Formatting and Linting + run: make lint + - name: Test with pytest + run: make test diff --git a/.github/workflows/python_docs.yml b/.github/workflows/python_docs.yml new file mode 100644 index 00000000000..b57bd29a475 --- /dev/null +++ b/.github/workflows/python_docs.yml @@ -0,0 +1,35 @@ +name: Powertools Python Docs + +on: + pull_request: + branches: + - master + paths: + - "python/**" + push: + branches: + - master + paths: + - "python/**" + +jobs: + docs: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ./python/ + steps: + - uses: actions/checkout@v1 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: "3.8" + - name: Install dependencies + run: make dev + - name: build docs + run: make docs + - name: deploy docs + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: python/docs/aws_lambda_powertools/ diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml deleted file mode 100644 index 44e661717b1..00000000000 --- a/.github/workflows/pythonpackage.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Powertools Python - -on: - pull_request: - branches: - - develop - - master - paths: - - "python/**" - push: - branches: - - develop - - master - paths: - - "python/**" - -jobs: - build: - runs-on: ubuntu-latest - strategy: - max-parallel: 4 - matrix: - python-version: [3.6, 3.7, 3.8] - steps: - - uses: actions/checkout@v1 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - make dev - working-directory: ./python/ - - name: Formatting and Linting - run: | - make lint - working-directory: ./python/ - - name: Test with pytest - run: | - make test - working-directory: ./python/ - docs: - runs-on: ubuntu-latest - defaults: - run: - working-directory: ./python/ - steps: - - uses: actions/checkout@v1 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: "3.8" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - make dev - - name: build docs - run: | - make docs - - name: deploy docs - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: python/docs/aws_lambda_powertools/ diff --git a/python/Makefile b/python/Makefile index a6ce4aefeb9..b3c1cbd476f 100644 --- a/python/Makefile +++ b/python/Makefile @@ -3,7 +3,7 @@ target: @$(MAKE) pr dev: - pip install --upgrade poetry + pip install --upgrade pip poetry poetry install format: From 6ce07e9b22d56de36c6634bae37c6d59922a839c Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Mon, 20 Apr 2020 16:55:50 +0100 Subject: [PATCH 11/21] Decorator factory Feat: Create your own middleware (#17) * feat(utils): add decorator factory * improv: use partial to reduce complexity * improv: add error handling * chore: type hint * docs: include pypi downloads badge * feat: opt in to trace each middleware that runs * improv: add initial util tests * improv: test explicit and implicit trace_execution * improv: test decorator with params * chore: linting * docs: include utilities * improv: correct tests, dec_factory only for func * improv: make util name more explicit * improv: doc trace_execution, fix casting * docs: add limitations, improve syntax * docs: use new docs syntax * fix: remove middleware decorator from libs * feat: build docs in CI * chore: linting * fix: CI python-version type * chore: remove docs CI * chore: kick CI * chore: include build badge master branch * chore: refactor naming * fix: rearrange tracing tests * improv(tracer): toggle default auto patching * feat(tracer): retrieve registered class instance * fix(Makefile): make cov target more explicit * improv(Register): support multiple classes reg. * improv(Register): inject class methods correctly * docs: add how to reutilize Tracer * improv(tracer): test auto patch method * improv: address nicolas feedback * improv: update example to reflect middleware feat * fix: metric dimension in root blob * chore: version bump Co-authored-by: heitorlessa --- python/HISTORY.md | 7 + python/Makefile | 2 +- python/README.md | 120 +++++++++- .../aws_lambda_powertools/logging/logger.py | 96 ++++---- python/aws_lambda_powertools/metrics/base.py | 1 + .../middleware_factory/__init__.py | 4 + .../middleware_factory/factory.py | 138 ++++++++++++ .../aws_lambda_powertools/tracing/tracer.py | 205 +++++++++++------- python/example/hello_world/app.py | 33 ++- python/poetry.lock | 125 ++++++----- python/pyproject.toml | 2 +- python/tests/functional/test_logger.py | 4 +- python/tests/functional/test_metrics.py | 4 +- python/tests/functional/test_tracing.py | 167 +++++++------- python/tests/functional/test_utils.py | 122 +++++++++++ python/tests/unit/test_tracing.py | 168 +++++++++----- 16 files changed, 850 insertions(+), 348 deletions(-) create mode 100644 python/aws_lambda_powertools/middleware_factory/__init__.py create mode 100644 python/aws_lambda_powertools/middleware_factory/factory.py create mode 100644 python/tests/functional/test_utils.py diff --git a/python/HISTORY.md b/python/HISTORY.md index 41120a69554..9d9d296c1c2 100644 --- a/python/HISTORY.md +++ b/python/HISTORY.md @@ -1,5 +1,12 @@ # HISTORY +## April 20th, 2020 + +**0.7.0** + +* Introduces Middleware Factory to build your own middleware +* Fixes Metrics dimensions not being included correctly in EMF + ## April 9th, 2020 **0.6.3** diff --git a/python/Makefile b/python/Makefile index b3c1cbd476f..fac2a8af791 100644 --- a/python/Makefile +++ b/python/Makefile @@ -17,7 +17,7 @@ lint: format test: poetry run pytest -vvv -test-html: +coverage-html: poetry run pytest --cov-report html pr: lint test diff --git a/python/README.md b/python/README.md index d312db8028a..5a96dc80b63 100644 --- a/python/README.md +++ b/python/README.md @@ -1,6 +1,6 @@ # Lambda Powertools -![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) +![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) ![Build](https://github.com/awslabs/aws-lambda-powertools/workflows/Powertools%20Python/badge.svg?branch=master) A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier - Currently available for Python only and compatible with Python >=3.6. @@ -32,12 +32,20 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, * Validate against common metric definitions mistakes (metric unit, values, max dimensions, max metrics, etc) * No stack, custom resource, data collection needed — Metrics are created async by CloudWatch EMF +**Bring your own middleware** + +* Utility to easily create your own middleware +* Run logic before, after, and handle exceptions +* Receive lambda handler, event, context +* Optionally create sub-segment for each custom middleware + **Environment variables** used across suite of utilities Environment variable | Description | Default | Utility ------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------- POWERTOOLS_SERVICE_NAME | Sets service name used for tracing namespace, metrics dimensions and structured logging | "service_undefined" | all POWERTOOLS_TRACE_DISABLED | Disables tracing | "false" | tracing +POWERTOOLS_TRACE_MIDDLEWARES | Creates sub-segment for each middleware created by lambda_handler_decorator | "false" | middleware_factory POWERTOOLS_LOGGER_LOG_EVENT | Logs incoming event | "false" | logging POWERTOOLS_LOGGER_SAMPLE_RATE | Debug log sampling | 0 | logging POWERTOOLS_METRICS_NAMESPACE | Metrics namespace | None | metrics @@ -85,6 +93,23 @@ def handler(event, context) ... ``` +**Fetching a pre-configured tracer anywhere** + +```python +# handler.py +from aws_lambda_powertools.tracing import Tracer +tracer = Tracer(service="payment") + +@tracer.capture_lambda_handler +def handler(event, context) + charge_id = event.get('charge_id') + payment = collect_payment(charge_id) + ... + +# another_file.py +from aws_lambda_powertools.tracing import Tracer +tracer = Tracer(auto_patch=False) # new instance using existing configuration with auto patching overriden +``` ### Logging @@ -154,7 +179,7 @@ def handler(event, context) } ``` -#### Custom Metrics async +### Custom Metrics async > **NOTE** `log_metric` will be removed once it's GA. @@ -204,6 +229,97 @@ with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1) as metric: metric.add_dimension(name="function_context", value="$LATEST") ``` + +### Utilities + +#### Bring your own middleware + +This feature allows you to create your own middleware as a decorator with ease by following a simple signature. + +* Accept 3 mandatory args - `handler, event, context` +* Always return the handler with event/context or response if executed + - Supports nested middleware/decorators use case + +**Middleware with no params** + +```python +from aws_lambda_powertools.middleware_factory import lambda_handler_decorator + +@lambda_handler_decorator +def middleware_name(handler, event, context): + return handler(event, context) + +@lambda_handler_decorator +def middleware_before_after(handler, event, context): + logic_before_handler_execution() + response = handler(event, context) + logic_after_handler_execution() + return response + + +# middleware_name will wrap Lambda handler +# and simply return the handler as we're not pre/post-processing anything +# then middleware_before_after will wrap middleware_name +# run some code before/after calling the handler returned by middleware_name +# This way, lambda_handler is only actually called once (top-down) +@middleware_before_after # This will run last +@middleware_name # This will run first +def lambda_handler(event, context): + return True +``` + +**Middleware with params** + +```python +@lambda_handler_decorator +def obfuscate_sensitive_data(handler, event, context, fields=None): + # Obfuscate email before calling Lambda handler + if fields: + for field in fields: + field = event.get(field, "") + event[field] = obfuscate_pii(field) + + return handler(event, context) + +@obfuscate_sensitive_data(fields=["email"]) +def lambda_handler(event, context): + return True +``` + +**Optionally trace middleware execution** + +This makes use of an existing Tracer instance that you may have initialized anywhere in your code, otherwise it'll initialize one using default options and provider (X-Ray). + +```python +from aws_lambda_powertools.middleware_factory import lambda_handler_decorator + +@lambda_handler_decorator(trace_execution=True) +def middleware_name(handler, event, context): + return handler(event, context) + +@middleware_name +def lambda_handler(event, context): + return True +``` + +Optionally, you can enrich the final trace with additional annotations and metadata by retrieving a copy of the Tracer used. + +```python +from aws_lambda_powertools.middleware_factory import lambda_handler_decorator +from aws_lambda_powertools.tracing import Tracer + +@lambda_handler_decorator(trace_execution=True) +def middleware_name(handler, event, context): + tracer = Tracer() # Takes a copy of an existing tracer instance + tracer.add_anotation... + tracer.metadata... + return handler(event, context) + +@middleware_name +def lambda_handler(event, context): + return True +``` + ## Beta > **[Progress towards GA](https://github.com/awslabs/aws-lambda-powertools/projects/1)** diff --git a/python/aws_lambda_powertools/logging/logger.py b/python/aws_lambda_powertools/logging/logger.py index de344f1f569..32dc1be6c1a 100644 --- a/python/aws_lambda_powertools/logging/logger.py +++ b/python/aws_lambda_powertools/logging/logger.py @@ -93,32 +93,34 @@ def logger_inject_lambda_context(lambda_handler: Callable[[Dict, Any], Any] = No Environment variables --------------------- POWERTOOLS_LOGGER_LOG_EVENT : str - instruct logger to log Lambda Event (e.g. "true", "True", "TRUE") + instruct logger to log Lambda Event (e.g. `"true", "True", "TRUE"`) Example ------- - Captures Lambda contextual runtime info (e.g memory, arn, req_id) - >>> from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context - >>> import logging - >>> - >>> logger = logging.getLogger(__name__) - >>> logging.setLevel(logging.INFO) - >>> logger_setup() - >>> - >>> @logger_inject_lambda_context - >>> def handler(event, context): + **Captures Lambda contextual runtime info (e.g memory, arn, req_id)** + + from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context + import logging + + logger = logging.getLogger(__name__) + logging.setLevel(logging.INFO) + logger_setup() + + @logger_inject_lambda_context + def handler(event, context): logger.info("Hello") - Captures Lambda contextual runtime info and logs incoming request - >>> from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context - >>> import logging - >>> - >>> logger = logging.getLogger(__name__) - >>> logging.setLevel(logging.INFO) - >>> logger_setup() - >>> - >>> @logger_inject_lambda_context(log_event=True) - >>> def handler(event, context): + **Captures Lambda contextual runtime info and logs incoming request** + + from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context + import logging + + logger = logging.getLogger(__name__) + logging.setLevel(logging.INFO) + logger_setup() + + @logger_inject_lambda_context(log_event=True) + def handler(event, context): logger.info("Hello") Returns @@ -128,9 +130,7 @@ def logger_inject_lambda_context(lambda_handler: Callable[[Dict, Any], Any] = No """ # If handler is None we've been called with parameters - # We then return a partial function with args filled - # Next time we're called we'll call our Lambda - # This allows us to avoid writing wrapper_wrapper type of fn + # Return a partial function with args filled if lambda_handler is None: logger.debug("Decorator called with parameters") return functools.partial(logger_inject_lambda_context, log_event=log_event) @@ -178,6 +178,8 @@ def log_metric( ): """Logs a custom metric in a statsD-esque format to stdout. + **This will be removed when GA - Use `aws_lambda_powertools.metrics.metrics.Metrics` instead** + Creating Custom Metrics synchronously impact on performance/execution time. Instead, log_metric prints a metric to CloudWatch Logs. That allows us to pick them up asynchronously via another Lambda function and create them as a metric. @@ -185,7 +187,7 @@ def log_metric( NOTE: It takes up to 9 dimensions by default, and Metric units are conveniently available via MetricUnit Enum. If service is not passed as arg or via env var, "service_undefined" will be used as dimension instead. - Output in CloudWatch Logs: MONITORING||||| + **Output in CloudWatch Logs**: `MONITORING|||||` Serverless Application Repository App that creates custom metric from this log output: https://serverlessrepo.aws.amazon.com/applications/arn:aws:serverlessrepo:us-east-1:374852340823:applications~async-custom-metrics @@ -195,23 +197,39 @@ def log_metric( POWERTOOLS_SERVICE_NAME: str service name + Parameters + ---------- + name : str + metric name, by default None + namespace : str + metric namespace (e.g. application name), by default None + unit : MetricUnit, by default MetricUnit.Count + metric unit enum value (e.g. MetricUnit.Seconds), by default None\n + API Info: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html + value : float, optional + metric value, by default 0 + service : str, optional + service name used as dimension, by default "service_undefined" + dimensions: dict, optional + keyword arguments as additional dimensions (e.g. `customer=customerId`) + Example ------- - Log metric to count number of successful payments; define service via env var + **Log metric to count number of successful payments; define service via env var** $ export POWERTOOLS_SERVICE_NAME="payment" - >>> from aws_lambda_powertools.logging import MetricUnit, log_metric - >>> log_metric( + from aws_lambda_powertools.logging import MetricUnit, log_metric + log_metric( name="SuccessfulPayments", unit=MetricUnit.Count, value=1, namespace="DemoApp" ) - Log metric to count number of successful payments per campaign & customer + **Log metric to count number of successful payments per campaign & customer** - >>> from aws_lambda_powertools.logging import MetricUnit, log_metric - >>> log_metric( + from aws_lambda_powertools.logging import MetricUnit, log_metric + log_metric( name="SuccessfulPayments", service="payment", unit=MetricUnit.Count, @@ -220,22 +238,6 @@ def log_metric( campaign=campaign_id, customer=customer_id ) - - Parameters - ---------- - name : str - metric name, by default None - namespace : str - metric namespace (e.g. application name), by default None - unit : MetricUnit, by default MetricUnit.Count - metric unit enum value (e.g. MetricUnit.Seconds), by default None - API Info: https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html - value : float, optional - metric value, by default 0 - service : str, optional - service name used as dimension, by default "service_undefined" - dimensions: dict, optional - keyword arguments as additional dimensions (e.g. customer=customerId) """ warnings.warn(message="This method will be removed in GA; use Metrics instead", category=DeprecationWarning) diff --git a/python/aws_lambda_powertools/metrics/base.py b/python/aws_lambda_powertools/metrics/base.py index 3c45bc619f8..448bfc37e02 100644 --- a/python/aws_lambda_powertools/metrics/base.py +++ b/python/aws_lambda_powertools/metrics/base.py @@ -177,6 +177,7 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> } metrics_timestamp = {"Timestamp": int(datetime.datetime.now().timestamp() * 1000)} metric_set["_aws"] = {**metrics_timestamp, **metrics_definition} + metric_set.update(**dimensions) try: logger.debug("Validating serialized metrics against CloudWatch EMF schema", metric_set) diff --git a/python/aws_lambda_powertools/middleware_factory/__init__.py b/python/aws_lambda_powertools/middleware_factory/__init__.py new file mode 100644 index 00000000000..9d57d843ec2 --- /dev/null +++ b/python/aws_lambda_powertools/middleware_factory/__init__.py @@ -0,0 +1,4 @@ +""" Utilities to enhance middlewares """ +from .factory import lambda_handler_decorator + +__all__ = ["lambda_handler_decorator"] diff --git a/python/aws_lambda_powertools/middleware_factory/factory.py b/python/aws_lambda_powertools/middleware_factory/factory.py new file mode 100644 index 00000000000..4dcab2adf33 --- /dev/null +++ b/python/aws_lambda_powertools/middleware_factory/factory.py @@ -0,0 +1,138 @@ +import functools +import inspect +import logging +import os +from distutils.util import strtobool +from typing import Callable + +from ..tracing import Tracer + +logger = logging.getLogger(__name__) +logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) + + +def lambda_handler_decorator(decorator: Callable = None, trace_execution=False): + """Decorator factory for decorating Lambda handlers. + + You can use lambda_handler_decorator to create your own middlewares, + where your function signature follows: `fn(handler, event, context)` + + Custom keyword arguments are also supported e.g. `fn(handler, event, context, option=value)` + + Middlewares created by this factory supports tracing to help you quickly troubleshoot + any overhead that custom middlewares may cause - They will appear as custom subsegments. + + **Non-key value params are not supported** e.g. `fn(handler, event, context, option)` + + Environment variables + --------------------- + POWERTOOLS_TRACE_MIDDLEWARES : str + uses `aws_lambda_powertools.tracing.Tracer` + to create sub-segments per middleware (e.g. `"true", "True", "TRUE"`) + + Parameters + ---------- + decorator: Callable + Middleware to be wrapped by this factory + trace_execution: bool + Flag to explicitly enable trace execution for middlewares.\n + `Env POWERTOOLS_TRACE_MIDDLEWARES="true"` + + Example + ------- + **Create a middleware no params** + + from aws_lambda_powertools.middleware_factory import lambda_handler_decorator + + @lambda_handler_decorator + def log_response(handler, event, context): + any_code_to_execute_before_lambda_handler() + response = handler(event, context) + any_code_to_execute_after_lambda_handler() + print(f"Lambda handler response: {response}") + + @log_response + def lambda_handler(event, context): + return True + + **Create a middleware with params** + + from aws_lambda_powertools.middleware_factory import lambda_handler_decorator + + @lambda_handler_decorator + def obfuscate_sensitive_data(handler, event, context, fields=None): + # Obfuscate email before calling Lambda handler + if fields: + for field in fields: + field = event.get(field, "") + event[field] = obfuscate_pii(field) + + response = handler(event, context) + print(f"Lambda handler response: {response}") + + @obfuscate_sensitive_data(fields=["email"]) + def lambda_handler(event, context): + return True + + **Trace execution of custom middleware** + + from aws_lambda_powertools.tracing import Tracer + from aws_lambda_powertools.middleware_factory import lambda_handler_decorator + + tracer = Tracer(service="payment") # or via env var + ... + @lambda_handler_decorator(trace_execution=True) + def log_response(handler, event, context): + ... + + @tracer.capture_lambda_handler + @log_response + def lambda_handler(event, context): + return True + + Limitations + ----------- + * Async middlewares not supported + * Classes, class methods middlewares not supported + + Raises + ------ + TypeError + When middleware receives non keyword=arguments + """ + + if decorator is None: + return functools.partial(lambda_handler_decorator, trace_execution=trace_execution) + + trace_execution = trace_execution or strtobool(str(os.getenv("POWERTOOLS_TRACE_MIDDLEWARES", False))) + + @functools.wraps(decorator) + def final_decorator(func: Callable = None, **kwargs): + # If called with kwargs return new func with kwargs + if func is None: + return functools.partial(final_decorator, **kwargs) + + if not inspect.isfunction(func): + raise TypeError( + f"Only keyword arguments is supported for middlewares: {decorator.__qualname__} received {func}" + ) + + @functools.wraps(func) + def wrapper(event, context): + try: + middleware = functools.partial(decorator, func, event, context, **kwargs) + if trace_execution: + tracer = Tracer(auto_patch=False) + tracer.create_subsegment(name=f"## {decorator.__qualname__}") + response = middleware() + tracer.end_subsegment() + else: + response = middleware() + return response + except Exception as err: + logger.error(f"Caught exception in {decorator.__qualname__}") + raise err + + return wrapper + + return final_decorator diff --git a/python/aws_lambda_powertools/tracing/tracer.py b/python/aws_lambda_powertools/tracing/tracer.py index 20a5a4b096c..0f3e3cff8bb 100644 --- a/python/aws_lambda_powertools/tracing/tracer.py +++ b/python/aws_lambda_powertools/tracing/tracer.py @@ -1,3 +1,4 @@ +import copy import functools import logging import os @@ -14,48 +15,60 @@ class Tracer: """Tracer using AWS-XRay to provide decorators with known defaults for Lambda functions - When running locally, it honours POWERTOOLS_TRACE_DISABLED environment variable - so end user code doesn't have to be modified to run it locally - instead Tracer returns dummy segments/subsegments. + When running locally, it detects whether it's running via SAM CLI, + and if it is it returns dummy segments/subsegments instead. - Tracing is automatically disabled when running locally via via SAM CLI. - - It patches all available libraries supported by X-Ray SDK + By default, it patches all available libraries supported by X-Ray SDK. Patching is + automatically disabled when running locally via SAM CLI or by any other means. \n Ref: https://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/thirdparty.html + Tracer keeps a copy of its configuration as it can be instantiated more than once. This + is useful when you are using your own middlewares and want to utilize an existing Tracer. + Make sure to set `auto_patch=False` in subsequent Tracer instances to avoid double patching. + Environment variables --------------------- POWERTOOLS_TRACE_DISABLED : str - disable tracer (e.g. "true", "True", "TRUE") + disable tracer (e.g. `"true", "True", "TRUE"`) POWERTOOLS_SERVICE_NAME : str service name + Parameters + ---------- + service: str + Service name that will be appended in all tracing metadata + auto_patch: bool + Patch existing imported modules during initialization, by default True + disabled: bool + Flag to explicitly disable tracing, useful when running/testing locally. + `Env POWERTOOLS_TRACE_DISABLED="true"` + Example ------- - A Lambda function using Tracer + **A Lambda function using Tracer** - >>> from aws_lambda_powertools.tracing import Tracer - >>> tracer = Tracer(service="greeting") + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer(service="greeting") - >>> @tracer.capture_method - >>> def greeting(name: str) -> Dict: - return { - "name": name - } + @tracer.capture_method + def greeting(name: str) -> Dict: + return { + "name": name + } - >>> @tracer.capture_lambda_handler - >>> def handler(event: dict, context: Any) -> Dict: - >>> print("Received event from Lambda...") - >>> response = greeting(name="Heitor") - >>> return response + @tracer.capture_lambda_handler + def handler(event: dict, context: Any) -> Dict: + print("Received event from Lambda...") + response = greeting(name="Heitor") + return response - Booking Lambda function using Tracer that adds additional annotation/metadata + **Booking Lambda function using Tracer that adds additional annotation/metadata** - >>> from aws_lambda_powertools.tracing import Tracer - >>> tracer = Tracer(service="booking") + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer(service="booking") - >>> @tracer.capture_method - >>> def confirm_booking(booking_id: str) -> Dict: + @tracer.capture_method + def confirm_booking(booking_id: str) -> Dict: resp = add_confirmation(booking_id) tracer.put_annotation("BookingConfirmation", resp['requestId']) @@ -63,49 +76,67 @@ class Tracer: return resp - >>> @tracer.capture_lambda_handler - >>> def handler(event: dict, context: Any) -> Dict: - >>> print("Received event from Lambda...") - >>> response = greeting(name="Heitor") - >>> return response + @tracer.capture_lambda_handler + def handler(event: dict, context: Any) -> Dict: + print("Received event from Lambda...") + response = greeting(name="Heitor") + return response - A Lambda function using service name via POWERTOOLS_SERVICE_NAME + **A Lambda function using service name via POWERTOOLS_SERVICE_NAME** - >>> export POWERTOOLS_SERVICE_NAME="booking" - >>> from aws_lambda_powertools.tracing import Tracer - >>> tracer = Tracer() + export POWERTOOLS_SERVICE_NAME="booking" + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer() - >>> @tracer.capture_lambda_handler - >>> def handler(event: dict, context: Any) -> Dict: - >>> print("Received event from Lambda...") - >>> response = greeting(name="Lessa") - >>> return response + @tracer.capture_lambda_handler + def handler(event: dict, context: Any) -> Dict: + print("Received event from Lambda...") + response = greeting(name="Lessa") + return response - Parameters - ---------- - service: str - Service name that will be appended in all tracing metadata - disabled: bool - Flag to explicitly disable tracing, useful when running locally. - Env: POWERTOOLS_TRACE_DISABLED="true" + **Reuse an existing instance of Tracer anywhere in the code** + + # lambda_handler.py + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer() + + @tracer.capture_lambda_handler + def handler(event: dict, context: Any) -> Dict: + ... + + # utils.py + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer() + ... Returns ------- Tracer Tracer instance with imported modules patched + + Limitations + ----------- + * Async handler and methods not supported + """ + _default_config = {"service": "service_undefined", "disabled": False, "provider": xray_recorder, "auto_patch": True} + _config = copy.copy(_default_config) + def __init__( - self, service: str = "service_undefined", disabled: bool = False, provider: xray_recorder = xray_recorder, + self, service: str = None, disabled: bool = None, provider: xray_recorder = None, auto_patch: bool = None ): - self.provider = provider - self.disabled = self.__is_trace_disabled() or disabled - self.service = os.getenv("POWERTOOLS_SERVICE_NAME") or service + self.__build_config(service=service, disabled=disabled, provider=provider, auto_patch=auto_patch) + self.provider = self._config["provider"] + self.disabled = self._config["disabled"] + self.service = self._config["service"] + self.auto_patch = self._config["auto_patch"] if self.disabled: self.__disable_tracing_provider() - self.__patch() + if self.auto_patch: + self.patch() def capture_lambda_handler(self, lambda_handler: Callable[[Dict, Any], Any] = None): """Decorator to create subsegment for lambda handlers @@ -115,11 +146,11 @@ def capture_lambda_handler(self, lambda_handler: Callable[[Dict, Any], Any] = No Example ------- - Lambda function using capture_lambda_handler decorator + **Lambda function using capture_lambda_handler decorator** - >>> tracer = Tracer(service="payment") - >>> @tracer.capture_lambda_handler - def handler(event, context) + tracer = Tracer(service="payment") + @tracer.capture_lambda_handler + def handler(event, context) Parameters ---------- @@ -134,7 +165,7 @@ def handler(event, context) @functools.wraps(lambda_handler) def decorate(event, context): - self.__create_subsegment(name=f"## {lambda_handler.__name__}") + self.create_subsegment(name=f"## {lambda_handler.__name__}") try: logger.debug("Calling lambda handler") @@ -148,7 +179,7 @@ def decorate(event, context): self.put_metadata(f"{self.service}_error", err) raise err finally: - self.__end_subsegment() + self.end_subsegment() return response @@ -162,12 +193,11 @@ def capture_method(self, method: Callable = None): Example ------- - Custom function using capture_method decorator - - >>> tracer = Tracer(service="payment") + **Custom function using capture_method decorator** - >>> @tracer.capture_method - def some_function() + tracer = Tracer(service="payment") + @tracer.capture_method + def some_function() Parameters ---------- @@ -183,7 +213,7 @@ def some_function() @functools.wraps(method) def decorate(*args, **kwargs): method_name = f"{method.__name__}" - self.__create_subsegment(name=f"## {method_name}") + self.create_subsegment(name=f"## {method_name}") try: logger.debug(f"Calling method: {method_name}") @@ -197,7 +227,7 @@ def decorate(*args, **kwargs): self.put_metadata(f"{method_name} error", err) raise err finally: - self.__end_subsegment() + self.end_subsegment() return response @@ -210,8 +240,8 @@ def put_annotation(self, key: str, value: Any): ------- Custom annotation for a pseudo service named payment - >>> tracer = Tracer(service="payment") - >>> tracer.put_annotation("PaymentStatus", "CONFIRMED") + tracer = Tracer(service="payment") + tracer.put_annotation("PaymentStatus", "CONFIRMED") Parameters ---------- @@ -244,9 +274,9 @@ def put_metadata(self, key: str, value: object, namespace: str = None): ------- Custom metadata for a pseudo service named payment - >>> tracer = Tracer(service="payment") - >>> response = collect_payment() - >>> tracer.put_metadata("Payment collection", response) + tracer = Tracer(service="payment") + response = collect_payment() + tracer.put_metadata("Payment collection", response) """ # Will no longer be needed once #155 is resolved # https://github.com/aws/aws-xray-sdk-python/issues/155 @@ -257,7 +287,7 @@ def put_metadata(self, key: str, value: object, namespace: str = None): logger.debug(f"Adding metadata on key '{key}'' with '{value}'' at namespace '{namespace}''") self.provider.put_metadata(key=key, value=value, namespace=_namespace) - def __create_subsegment(self, name: str) -> models.subsegment: + def create_subsegment(self, name: str) -> models.subsegment: """Creates subsegment or a dummy segment plus subsegment if tracing is disabled It also assumes Tracer would be instantiated statically so that cold starts are captured. @@ -271,7 +301,7 @@ def __create_subsegment(self, name: str) -> models.subsegment: ------- Creates a genuine subsegment - >>> self.__create_subsegment(name="a meaningful name") + self.create_subsegment(name="a meaningful name") Returns ------- @@ -296,7 +326,7 @@ def __create_subsegment(self, name: str) -> models.subsegment: return subsegment - def __end_subsegment(self): + def end_subsegment(self): """Ends an existing subsegment Parameters @@ -310,19 +340,18 @@ def __end_subsegment(self): self.provider.end_subsegment() - def __patch(self): - """Patch modules for instrumentation - """ + def patch(self): + """Patch modules for instrumentation""" logger.debug("Patching modules...") - is_lambda_emulator = os.getenv("AWS_SAM_LOCAL") - is_lambda_env = os.getenv("LAMBDA_TASK_ROOT") + is_lambda_emulator = os.getenv("AWS_SAM_LOCAL", False) + is_lambda_env = os.getenv("LAMBDA_TASK_ROOT", False) if self.disabled: logger.debug("Tracing has been disabled, aborting patch") return - if is_lambda_emulator or not is_lambda_env: + if is_lambda_emulator or is_lambda_env: logger.debug("Running under SAM CLI env or not in Lambda; aborting patch") return @@ -339,9 +368,9 @@ def __is_trace_disabled(self) -> bool: Tracing is automatically disabled in the following conditions: - 1. Explicitly disabled via TRACE_DISABLED environment variable + 1. Explicitly disabled via `TRACE_DISABLED` environment variable 2. Running in Lambda Emulators where X-Ray Daemon will not be listening - 3. Explicitly disabled via constructor e.g Tracer(disabled=True) + 3. Explicitly disabled via constructor e.g `Tracer(disabled=True)` Returns ------- @@ -361,3 +390,19 @@ def __is_trace_disabled(self) -> bool: return is_lambda_emulator return False + + def __build_config( + self, service: str = None, disabled: bool = None, provider: xray_recorder = None, auto_patch: bool = None + ): + """ Populates Tracer config for new and existing initializations """ + is_disabled = disabled if disabled is not None else self.__is_trace_disabled() + is_service = service if service is not None else os.getenv("POWERTOOLS_SERVICE_NAME") + + self._config["provider"] = provider if provider is not None else self._config["provider"] + self._config["auto_patch"] = auto_patch if auto_patch is not None else self._config["auto_patch"] + self._config["service"] = is_service if is_service else self._config["service"] + self._config["disabled"] = is_disabled if is_disabled else self._config["disabled"] + + @classmethod + def _reset_config(cls): + cls._config = copy.copy(cls._default_config) diff --git a/python/example/hello_world/app.py b/python/example/hello_world/app.py index 033e55dcb03..8836b542476 100644 --- a/python/example/hello_world/app.py +++ b/python/example/hello_world/app.py @@ -1,9 +1,11 @@ import json +import requests + from aws_lambda_powertools.logging import logger_inject_lambda_context, logger_setup -from aws_lambda_powertools.tracing import Tracer from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric -import requests +from aws_lambda_powertools.middleware_factory import lambda_handler_decorator +from aws_lambda_powertools.tracing import Tracer tracer = Tracer() logger = logger_setup() @@ -13,8 +15,22 @@ metrics.add_dimension(name="operation", value="example") + +@lambda_handler_decorator(trace_execution=True) +def my_middleware(handler, event, context, say_hello=False): + if say_hello: + print("========= HELLO PARAM DETECTED =========") + print("========= Logging event before Handler is called =========") + print(event) + ret = handler(event, context) + print("========= Logging response after Handler is called =========") + print(ret) + return ret + + @metrics.log_metrics @tracer.capture_lambda_handler +@my_middleware(say_hello=True) @logger_inject_lambda_context def lambda_handler(event, context): """Sample pure Lambda function @@ -41,7 +57,7 @@ def lambda_handler(event, context): if _cold_start: logger.debug("Recording cold start metric") metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) - metrics.add_dimension(name="function_name", value=context.function_name) + metrics.add_dimension(name="function_name", value=context.function_name) _cold_start = False try: @@ -49,17 +65,14 @@ def lambda_handler(event, context): metrics.add_metric(name="SuccessfulLocations", unit="Count", value=1) except requests.RequestException as e: # Send some context about this error to Lambda Logs - logger.error(e) - raise e - + logger.exception(e, exc_info=True) + raise + with single_metric(name="UniqueMetricDimension", unit="Seconds", value=1) as metric: metric.add_dimension(name="unique_dimension", value="for_unique_metric") logger.info("Returning message to the caller") return { "statusCode": 200, - "body": json.dumps({ - "message": "hello world", - "location": ip.text.replace("\n", "") - }), + "body": json.dumps({"message": "hello world", "location": ip.text.replace("\n", "")}), } diff --git a/python/poetry.lock b/python/poetry.lock index 118fd5b1823..2ca7efeb460 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -35,7 +35,7 @@ description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers name = "aws-xray-sdk" optional = false python-versions = "*" -version = "2.4.3" +version = "2.5.0" [package.dependencies] botocore = ">=1.11.3" @@ -69,7 +69,7 @@ description = "Low-level, data-driven core of boto 3." name = "botocore" optional = false python-versions = "*" -version = "1.15.37" +version = "1.15.41" [package.dependencies] docutils = ">=0.10,<0.16" @@ -111,7 +111,7 @@ description = "Code coverage measurement for Python" name = "coverage" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" -version = "5.0.4" +version = "5.1" [package.dependencies] [package.dependencies.toml] @@ -322,9 +322,8 @@ version = "1.4.14" license = ["editdistance"] [[package]] -category = "dev" +category = "main" description = "Read metadata from Python packages" -marker = "python_version < \"3.8\"" name = "importlib-metadata" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" @@ -385,8 +384,16 @@ category = "main" description = "Python library for serializing any arbitrary object graph into JSON" name = "jsonpickle" optional = false -python-versions = "*" -version = "1.3" +python-versions = ">=2.7" +version = "1.4" + +[package.dependencies] +importlib-metadata = "*" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["coverage (<5)", "pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-black-multipy", "pytest-cov", "ecdsa", "feedparser", "numpy", "pandas", "pymongo", "sqlalchemy", "enum34", "jsonlib"] +"testing.libs" = ["demjson", "simplejson", "ujson", "yajl"] [[package]] category = "dev" @@ -467,7 +474,7 @@ description = "Utility library for gitignore style pattern matching of file path name = "pathspec" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "0.7.0" +version = "0.8.0" [[package]] category = "dev" @@ -649,7 +656,7 @@ description = "A collection of helpers and mock objects for unit tests and doc t name = "testfixtures" optional = false python-versions = "*" -version = "6.14.0" +version = "6.14.1" [package.extras] build = ["setuptools-git", "wheel", "twine"] @@ -679,11 +686,11 @@ marker = "python_version != \"3.4\"" name = "urllib3" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" -version = "1.25.8" +version = "1.25.9" [package.extras] brotli = ["brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "pyOpenSSL (>=0.14)", "ipaddress"] socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7,<2.0)"] [[package]] @@ -692,7 +699,7 @@ description = "Virtual Python Environment builder" name = "virtualenv" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" -version = "20.0.16" +version = "20.0.18" [package.dependencies] appdirs = ">=1.4.3,<2" @@ -710,7 +717,7 @@ version = ">=1.0,<2" [package.extras] docs = ["sphinx (>=2.0.0,<3)", "sphinx-argparse (>=0.2.5,<1)", "sphinx-rtd-theme (>=0.4.3,<1)", "towncrier (>=19.9.0rc1)", "proselint (>=0.10.2,<1)"] -testing = ["pytest (>=4.0.0,<6)", "coverage (>=4.5.1,<6)", "pytest-mock (>=2.0.0,<3)", "pytest-env (>=0.6.2,<1)", "pytest-timeout (>=1.3.4,<2)", "packaging (>=20.0)", "xonsh (>=0.9.13,<1)"] +testing = ["pytest (>=4.0.0,<6)", "coverage (>=4.5.1,<6)", "pytest-mock (>=2.0.0,<3)", "pytest-env (>=0.6.2,<1)", "pytest-timeout (>=1.3.4,<2)", "packaging (>=20.0)", "xonsh (>=0.9.16,<1)"] [[package]] category = "dev" @@ -729,7 +736,7 @@ python-versions = "*" version = "1.12.1" [[package]] -category = "dev" +category = "main" description = "Backport of pathlib-compatible object wrapper for zip files" marker = "python_version < \"3.8\"" name = "zipp" @@ -759,16 +766,16 @@ attrs = [ {file = "attrs-19.3.0.tar.gz", hash = "sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"}, ] aws-xray-sdk = [ - {file = "aws-xray-sdk-2.4.3.tar.gz", hash = "sha256:263a38f3920d9dc625e3acb92e6f6d300f4250b70f538bd009ce6e485676ab74"}, - {file = "aws_xray_sdk-2.4.3-py2.py3-none-any.whl", hash = "sha256:612dba6efc3704ef224ac0747b05488b8aad94e71be3ece4edbc051189d50482"}, + {file = "aws-xray-sdk-2.5.0.tar.gz", hash = "sha256:8dfa785305fc8dc720d8d4c2ec6a58e85e467ddc3a53b1506a2ed8b5801c8fc7"}, + {file = "aws_xray_sdk-2.5.0-py2.py3-none-any.whl", hash = "sha256:ae57baeb175993bdbf31f83843e2c0958dd5aa8cb691ab5628aafb6ccc78a0fc"}, ] black = [ {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, ] botocore = [ - {file = "botocore-1.15.37-py2.py3-none-any.whl", hash = "sha256:30055e9a3e313400d92ca4ad599e6506d71fb1addc75f075ab7179973ac52de6"}, - {file = "botocore-1.15.37.tar.gz", hash = "sha256:51422695a5a39ca9320acd3edaf7b337bed75bbc7d260deb76c1d801adc0daa2"}, + {file = "botocore-1.15.41-py2.py3-none-any.whl", hash = "sha256:b12a5b642aa210a72d84204da18618276eeae052fbff58958f57d28ef3193034"}, + {file = "botocore-1.15.41.tar.gz", hash = "sha256:a45a65ba036bc980decfc3ce6c2688a2d5fffd76e4b02ea4d59e63ff0f6896d4"}, ] cfgv = [ {file = "cfgv-3.0.0-py2.py3-none-any.whl", hash = "sha256:f22b426ed59cd2ab2b54ff96608d846c33dfb8766a67f0b4a6ce130ce244414f"}, @@ -783,37 +790,37 @@ colorama = [ {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, ] coverage = [ - {file = "coverage-5.0.4-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:8a620767b8209f3446197c0e29ba895d75a1e272a36af0786ec70fe7834e4307"}, - {file = "coverage-5.0.4-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:73aa6e86034dad9f00f4bbf5a666a889d17d79db73bc5af04abd6c20a014d9c8"}, - {file = "coverage-5.0.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:408ce64078398b2ee2ec08199ea3fcf382828d2f8a19c5a5ba2946fe5ddc6c31"}, - {file = "coverage-5.0.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:cda33311cb9fb9323958a69499a667bd728a39a7aa4718d7622597a44c4f1441"}, - {file = "coverage-5.0.4-cp27-cp27m-win32.whl", hash = "sha256:5f587dfd83cb669933186661a351ad6fc7166273bc3e3a1531ec5c783d997aac"}, - {file = "coverage-5.0.4-cp27-cp27m-win_amd64.whl", hash = "sha256:9fad78c13e71546a76c2f8789623eec8e499f8d2d799f4b4547162ce0a4df435"}, - {file = "coverage-5.0.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:2e08c32cbede4a29e2a701822291ae2bc9b5220a971bba9d1e7615312efd3037"}, - {file = "coverage-5.0.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:922fb9ef2c67c3ab20e22948dcfd783397e4c043a5c5fa5ff5e9df5529074b0a"}, - {file = "coverage-5.0.4-cp35-cp35m-macosx_10_12_x86_64.whl", hash = "sha256:c3fc325ce4cbf902d05a80daa47b645d07e796a80682c1c5800d6ac5045193e5"}, - {file = "coverage-5.0.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:046a1a742e66d065d16fb564a26c2a15867f17695e7f3d358d7b1ad8a61bca30"}, - {file = "coverage-5.0.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6ad6ca45e9e92c05295f638e78cd42bfaaf8ee07878c9ed73e93190b26c125f7"}, - {file = "coverage-5.0.4-cp35-cp35m-win32.whl", hash = "sha256:eda55e6e9ea258f5e4add23bcf33dc53b2c319e70806e180aecbff8d90ea24de"}, - {file = "coverage-5.0.4-cp35-cp35m-win_amd64.whl", hash = "sha256:4a8a259bf990044351baf69d3b23e575699dd60b18460c71e81dc565f5819ac1"}, - {file = "coverage-5.0.4-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:f372cdbb240e09ee855735b9d85e7f50730dcfb6296b74b95a3e5dea0615c4c1"}, - {file = "coverage-5.0.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a37c6233b28e5bc340054cf6170e7090a4e85069513320275a4dc929144dccf0"}, - {file = "coverage-5.0.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:443be7602c790960b9514567917af538cac7807a7c0c0727c4d2bbd4014920fd"}, - {file = "coverage-5.0.4-cp36-cp36m-win32.whl", hash = "sha256:165a48268bfb5a77e2d9dbb80de7ea917332a79c7adb747bd005b3a07ff8caf0"}, - {file = "coverage-5.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:0a907199566269e1cfa304325cc3b45c72ae341fbb3253ddde19fa820ded7a8b"}, - {file = "coverage-5.0.4-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:513e6526e0082c59a984448f4104c9bf346c2da9961779ede1fc458e8e8a1f78"}, - {file = "coverage-5.0.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:3844c3dab800ca8536f75ae89f3cf566848a3eb2af4d9f7b1103b4f4f7a5dad6"}, - {file = "coverage-5.0.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:641e329e7f2c01531c45c687efcec8aeca2a78a4ff26d49184dce3d53fc35014"}, - {file = "coverage-5.0.4-cp37-cp37m-win32.whl", hash = "sha256:db1d4e38c9b15be1521722e946ee24f6db95b189d1447fa9ff18dd16ba89f732"}, - {file = "coverage-5.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:62061e87071497951155cbccee487980524d7abea647a1b2a6eb6b9647df9006"}, - {file = "coverage-5.0.4-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:65a7e00c00472cd0f59ae09d2fb8a8aaae7f4a0cf54b2b74f3138d9f9ceb9cb2"}, - {file = "coverage-5.0.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1f66cf263ec77af5b8fe14ef14c5e46e2eb4a795ac495ad7c03adc72ae43fafe"}, - {file = "coverage-5.0.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:85596aa5d9aac1bf39fe39d9fa1051b0f00823982a1de5766e35d495b4a36ca9"}, - {file = "coverage-5.0.4-cp38-cp38-win32.whl", hash = "sha256:86a0ea78fd851b313b2e712266f663e13b6bc78c2fb260b079e8b67d970474b1"}, - {file = "coverage-5.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:03f630aba2b9b0d69871c2e8d23a69b7fe94a1e2f5f10df5049c0df99db639a0"}, - {file = "coverage-5.0.4-cp39-cp39-win32.whl", hash = "sha256:7c9762f80a25d8d0e4ab3cb1af5d9dffbddb3ee5d21c43e3474c84bf5ff941f7"}, - {file = "coverage-5.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4482f69e0701139d0f2c44f3c395d1d1d37abd81bfafbf9b6efbe2542679d892"}, - {file = "coverage-5.0.4.tar.gz", hash = "sha256:1b60a95fc995649464e0cd48cecc8288bac5f4198f21d04b8229dc4097d76823"}, + {file = "coverage-5.1-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65"}, + {file = "coverage-5.1-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:c317eaf5ff46a34305b202e73404f55f7389ef834b8dbf4da09b9b9b37f76dd2"}, + {file = "coverage-5.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b83835506dfc185a319031cf853fa4bb1b3974b1f913f5bb1a0f3d98bdcded04"}, + {file = "coverage-5.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5f2294dbf7875b991c381e3d5af2bcc3494d836affa52b809c91697449d0eda6"}, + {file = "coverage-5.1-cp27-cp27m-win32.whl", hash = "sha256:de807ae933cfb7f0c7d9d981a053772452217df2bf38e7e6267c9cbf9545a796"}, + {file = "coverage-5.1-cp27-cp27m-win_amd64.whl", hash = "sha256:bf9cb9a9fd8891e7efd2d44deb24b86d647394b9705b744ff6f8261e6f29a730"}, + {file = "coverage-5.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:acf3763ed01af8410fc36afea23707d4ea58ba7e86a8ee915dfb9ceff9ef69d0"}, + {file = "coverage-5.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:dec5202bfe6f672d4511086e125db035a52b00f1648d6407cc8e526912c0353a"}, + {file = "coverage-5.1-cp35-cp35m-macosx_10_12_x86_64.whl", hash = "sha256:7a5bdad4edec57b5fb8dae7d3ee58622d626fd3a0be0dfceda162a7035885ecf"}, + {file = "coverage-5.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:1601e480b9b99697a570cea7ef749e88123c04b92d84cedaa01e117436b4a0a9"}, + {file = "coverage-5.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:dbe8c6ae7534b5b024296464f387d57c13caa942f6d8e6e0346f27e509f0f768"}, + {file = "coverage-5.1-cp35-cp35m-win32.whl", hash = "sha256:a027ef0492ede1e03a8054e3c37b8def89a1e3c471482e9f046906ba4f2aafd2"}, + {file = "coverage-5.1-cp35-cp35m-win_amd64.whl", hash = "sha256:0e61d9803d5851849c24f78227939c701ced6704f337cad0a91e0972c51c1ee7"}, + {file = "coverage-5.1-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:2d27a3f742c98e5c6b461ee6ef7287400a1956c11421eb574d843d9ec1f772f0"}, + {file = "coverage-5.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:66460ab1599d3cf894bb6baee8c684788819b71a5dc1e8fa2ecc152e5d752019"}, + {file = "coverage-5.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5c542d1e62eece33c306d66fe0a5c4f7f7b3c08fecc46ead86d7916684b36d6c"}, + {file = "coverage-5.1-cp36-cp36m-win32.whl", hash = "sha256:2742c7515b9eb368718cd091bad1a1b44135cc72468c731302b3d641895b83d1"}, + {file = "coverage-5.1-cp36-cp36m-win_amd64.whl", hash = "sha256:dead2ddede4c7ba6cb3a721870f5141c97dc7d85a079edb4bd8d88c3ad5b20c7"}, + {file = "coverage-5.1-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:01333e1bd22c59713ba8a79f088b3955946e293114479bbfc2e37d522be03355"}, + {file = "coverage-5.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:e1ea316102ea1e1770724db01998d1603ed921c54a86a2efcb03428d5417e489"}, + {file = "coverage-5.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:adeb4c5b608574a3d647011af36f7586811a2c1197c861aedb548dd2453b41cd"}, + {file = "coverage-5.1-cp37-cp37m-win32.whl", hash = "sha256:782caea581a6e9ff75eccda79287daefd1d2631cc09d642b6ee2d6da21fc0a4e"}, + {file = "coverage-5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:00f1d23f4336efc3b311ed0d807feb45098fc86dee1ca13b3d6768cdab187c8a"}, + {file = "coverage-5.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:402e1744733df483b93abbf209283898e9f0d67470707e3c7516d84f48524f55"}, + {file = "coverage-5.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:a3f3654d5734a3ece152636aad89f58afc9213c6520062db3978239db122f03c"}, + {file = "coverage-5.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6402bd2fdedabbdb63a316308142597534ea8e1895f4e7d8bf7476c5e8751fef"}, + {file = "coverage-5.1-cp38-cp38-win32.whl", hash = "sha256:8fa0cbc7ecad630e5b0f4f35b0f6ad419246b02bc750de7ac66db92667996d24"}, + {file = "coverage-5.1-cp38-cp38-win_amd64.whl", hash = "sha256:79a3cfd6346ce6c13145731d39db47b7a7b859c0272f02cdb89a3bdcbae233a0"}, + {file = "coverage-5.1-cp39-cp39-win32.whl", hash = "sha256:a82b92b04a23d3c8a581fc049228bafde988abacba397d57ce95fe95e0338ab4"}, + {file = "coverage-5.1-cp39-cp39-win_amd64.whl", hash = "sha256:bb28a7245de68bf29f6fb199545d072d1036a1917dca17a1e75bbb919e14ee8e"}, + {file = "coverage-5.1.tar.gz", hash = "sha256:f90bfc4ad18450c80b024036eaf91e4a246ae287701aaa88eaebebf150868052"}, ] distlib = [ {file = "distlib-0.3.0.zip", hash = "sha256:2e166e231a26b36d6dfe35a48c4464346620f8645ed0ace01ee31822b288de21"}, @@ -899,8 +906,8 @@ jmespath = [ {file = "jmespath-0.9.5.tar.gz", hash = "sha256:cca55c8d153173e21baa59983015ad0daf603f9cb799904ff057bfb8ff8dc2d9"}, ] jsonpickle = [ - {file = "jsonpickle-1.3-py2.py3-none-any.whl", hash = "sha256:efc6839cb341985f0c24f98650a4c1063a2877c236ffd3d7e1662f0c482bac93"}, - {file = "jsonpickle-1.3.tar.gz", hash = "sha256:71bca2b80ae28af4e3f86629ef247100af7f97032b5ca8d791c1f8725b411d95"}, + {file = "jsonpickle-1.4-py2.py3-none-any.whl", hash = "sha256:3d71018794242f6b1640f779a94a192500f73ceed9ef579b4f01799171ec3fb3"}, + {file = "jsonpickle-1.4.tar.gz", hash = "sha256:e8ca6ec3f379f5eee6e11380d48db220aacc282b480dea46b11cc6f6009d1cdb"}, ] mako = [ {file = "Mako-1.1.2-py2.py3-none-any.whl", hash = "sha256:8e8b53c71c7e59f3de716b6832c4e401d903af574f6962edbbbf6ecc2a5fe6c9"}, @@ -961,8 +968,8 @@ packaging = [ {file = "packaging-20.3.tar.gz", hash = "sha256:3c292b474fda1671ec57d46d739d072bfd495a4f51ad01a055121d81e952b7a3"}, ] pathspec = [ - {file = "pathspec-0.7.0-py2.py3-none-any.whl", hash = "sha256:163b0632d4e31cef212976cf57b43d9fd6b0bac6e67c26015d611a647d5e7424"}, - {file = "pathspec-0.7.0.tar.gz", hash = "sha256:562aa70af2e0d434367d9790ad37aed893de47f1693e4201fd1d3dca15d19b96"}, + {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, + {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, ] pdoc3 = [ {file = "pdoc3-0.7.5.tar.gz", hash = "sha256:ebca75b7fcf23f3b4320abe23339834d3f08c28517718e9d29e555fc38eeb33c"}, @@ -1048,8 +1055,8 @@ six = [ {file = "six-1.14.0.tar.gz", hash = "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a"}, ] testfixtures = [ - {file = "testfixtures-6.14.0-py2.py3-none-any.whl", hash = "sha256:799144b3cbef7b072452d9c36cbd024fef415ab42924b96aad49dfd9c763de66"}, - {file = "testfixtures-6.14.0.tar.gz", hash = "sha256:cdfc3d73cb6d3d4dc3c67af84d912e86bf117d30ae25f02fe823382ef99383d2"}, + {file = "testfixtures-6.14.1-py2.py3-none-any.whl", hash = "sha256:30566e24a1b34e4d3f8c13abf62557d01eeb4480bcb8f1745467bfb0d415a7d9"}, + {file = "testfixtures-6.14.1.tar.gz", hash = "sha256:58d2b3146d93bc5ddb0cd24e0ccacb13e29bdb61e5c81235c58f7b8ee4470366"}, ] toml = [ {file = "toml-0.10.0-py2.7.egg", hash = "sha256:f1db651f9657708513243e61e6cc67d101a39bad662eaa9b5546f789338e07a3"}, @@ -1080,12 +1087,12 @@ typed-ast = [ {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, ] urllib3 = [ - {file = "urllib3-1.25.8-py2.py3-none-any.whl", hash = "sha256:2f3db8b19923a873b3e5256dc9c2dedfa883e33d87c690d9c7913e1f40673cdc"}, - {file = "urllib3-1.25.8.tar.gz", hash = "sha256:87716c2d2a7121198ebcb7ce7cccf6ce5e9ba539041cfbaeecfb641dc0bf6acc"}, + {file = "urllib3-1.25.9-py2.py3-none-any.whl", hash = "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115"}, + {file = "urllib3-1.25.9.tar.gz", hash = "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527"}, ] virtualenv = [ - {file = "virtualenv-20.0.16-py2.py3-none-any.whl", hash = "sha256:94f647e12d1e6ced2541b93215e51752aecbd1bbb18eb1816e2867f7532b1fe1"}, - {file = "virtualenv-20.0.16.tar.gz", hash = "sha256:6ea131d41c477f6c4b7863948a9a54f7fa196854dbef73efbdff32b509f4d8bf"}, + {file = "virtualenv-20.0.18-py2.py3-none-any.whl", hash = "sha256:5021396e8f03d0d002a770da90e31e61159684db2859d0ba4850fbea752aa675"}, + {file = "virtualenv-20.0.18.tar.gz", hash = "sha256:ac53ade75ca189bc97b6c1d9ec0f1a50efe33cbf178ae09452dcd9fd309013c1"}, ] wcwidth = [ {file = "wcwidth-0.1.9-py2.py3-none-any.whl", hash = "sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1"}, diff --git a/python/pyproject.toml b/python/pyproject.toml index 851feac4cbb..3360e7fd7ed 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.6.3" +version = "0.7.0" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ diff --git a/python/tests/functional/test_logger.py b/python/tests/functional/test_logger.py index cd2327231a9..db8a09e8d57 100644 --- a/python/tests/functional/test_logger.py +++ b/python/tests/functional/test_logger.py @@ -150,7 +150,7 @@ def test_inject_lambda_context_log_event_request_env_var(monkeypatch, root_logge logger = logger_setup() - @logger_inject_lambda_context() + @logger_inject_lambda_context def handler(event, context): logger.info("Hello") @@ -177,7 +177,7 @@ def test_inject_lambda_context_log_no_request_by_default(monkeypatch, root_logge logger = logger_setup() - @logger_inject_lambda_context() + @logger_inject_lambda_context def handler(event, context): logger.info("Hello") diff --git a/python/tests/functional/test_metrics.py b/python/tests/functional/test_metrics.py index 95b0422c9f2..0feaf3303ff 100644 --- a/python/tests/functional/test_metrics.py +++ b/python/tests/functional/test_metrics.py @@ -155,6 +155,8 @@ def lambda_handler(evt, handler): remove_timestamp(metrics=[output, expected]) # Timestamp will always be different assert expected["_aws"] == output["_aws"] + for dimension in dimensions: + assert dimension["name"] in output def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): @@ -208,7 +210,7 @@ def test_log_metrics_schema_error(metrics, dimensions, namespace): my_metrics = Metrics() @my_metrics.log_metrics - def lambda_handler(evt, handler): + def lambda_handler(evt, context): my_metrics.add_namespace(namespace) for metric in metrics: my_metrics.add_metric(**metric) diff --git a/python/tests/functional/test_tracing.py b/python/tests/functional/test_tracing.py index b77710090e8..8ceb479190a 100644 --- a/python/tests/functional/test_tracing.py +++ b/python/tests/functional/test_tracing.py @@ -8,70 +8,31 @@ def dummy_response(): return {"test": "succeeds"} -@pytest.fixture -def xray_stub(mocker): - class XRayStub: - def __init__( - self, - put_metadata_mock: mocker.MagicMock = None, - put_annotation_mock: mocker.MagicMock = None, - begin_subsegment_mock: mocker.MagicMock = None, - end_subsegment_mock: mocker.MagicMock = None, - ): - self.put_metadata_mock = put_metadata_mock or mocker.MagicMock() - self.put_annotation_mock = put_annotation_mock or mocker.MagicMock() - self.begin_subsegment_mock = begin_subsegment_mock or mocker.MagicMock() - self.end_subsegment_mock = end_subsegment_mock or mocker.MagicMock() - - def put_metadata(self, *args, **kwargs): - return self.put_metadata_mock(*args, **kwargs) - - def put_annotation(self, *args, **kwargs): - return self.put_annotation_mock(*args, **kwargs) - - def begin_subsegment(self, *args, **kwargs): - return self.begin_subsegment_mock(*args, **kwargs) - - def end_subsegment(self, *args, **kwargs): - return self.end_subsegment_mock(*args, **kwargs) - - return XRayStub - - -def test_tracer_lambda_handler(mocker, dummy_response, xray_stub): - put_metadata_mock = mocker.MagicMock() - begin_subsegment_mock = mocker.MagicMock() - end_subsegment_mock = mocker.MagicMock() - - xray_provider = xray_stub( - put_metadata_mock=put_metadata_mock, - begin_subsegment_mock=begin_subsegment_mock, - end_subsegment_mock=end_subsegment_mock, - ) - tracer = Tracer(provider=xray_provider, service="booking") +@pytest.fixture(scope="function", autouse=True) +def reset_tracing_config(): + Tracer._reset_config() + yield + + +def test_capture_lambda_handler(dummy_response): + # GIVEN tracer is disabled, and decorator is used + # WHEN a lambda handler is run + # THEN tracer should not raise an Exception + tracer = Tracer(disabled=True) @tracer.capture_lambda_handler def handler(event, context): return dummy_response - handler({}, mocker.MagicMock()) + handler({}, {}) - assert begin_subsegment_mock.call_count == 1 - assert begin_subsegment_mock.call_args == mocker.call(name="## handler") - assert end_subsegment_mock.call_count == 1 - assert put_metadata_mock.call_args == mocker.call( - key="lambda handler response", value=dummy_response, namespace="booking" - ) +def test_capture_method(dummy_response): + # GIVEN tracer is disabled, and method decorator is used + # WHEN a function is run + # THEN tracer should not raise an Exception -def test_tracer_method(mocker, dummy_response, xray_stub): - put_metadata_mock = mocker.MagicMock() - put_annotation_mock = mocker.MagicMock() - begin_subsegment_mock = mocker.MagicMock() - end_subsegment_mock = mocker.MagicMock() - - xray_provider = xray_stub(put_metadata_mock, put_annotation_mock, begin_subsegment_mock, end_subsegment_mock) - tracer = Tracer(provider=xray_provider, service="booking") + tracer = Tracer(disabled=True) @tracer.capture_method def greeting(name, message): @@ -79,51 +40,87 @@ def greeting(name, message): greeting(name="Foo", message="Bar") - assert begin_subsegment_mock.call_count == 1 - assert begin_subsegment_mock.call_args == mocker.call(name="## greeting") - assert end_subsegment_mock.call_count == 1 - assert put_metadata_mock.call_args == mocker.call( - key="greeting response", value=dummy_response, namespace="booking" - ) +def test_tracer_lambda_emulator(monkeypatch, dummy_response): + # GIVEN tracer is run locally + # WHEN a lambda function is run through SAM CLI + # THEN tracer should not raise an Exception + monkeypatch.setenv("AWS_SAM_LOCAL", "true") + tracer = Tracer() + + @tracer.capture_lambda_handler + def handler(event, context): + return dummy_response -def test_tracer_custom_annotation(mocker, dummy_response, xray_stub): - put_annotation_mock = mocker.MagicMock() + handler({}, {}) + monkeypatch.delenv("AWS_SAM_LOCAL") - xray_provider = xray_stub(put_annotation_mock=put_annotation_mock) - tracer = Tracer(provider=xray_provider, service="booking") - annotation_key = "BookingId" - annotation_value = "123456" +def test_tracer_metadata_disabled(dummy_response): + # GIVEN tracer is disabled, and annotations/metadata are used + # WHEN a lambda handler is run + # THEN tracer should not raise an Exception and simply ignore + tracer = Tracer(disabled=True) @tracer.capture_lambda_handler def handler(event, context): - tracer.put_annotation(annotation_key, annotation_value) + tracer.put_annotation("PaymentStatus", "SUCCESS") + tracer.put_metadata("PaymentMetadata", "Metadata") return dummy_response - handler({}, mocker.MagicMock()) + handler({}, {}) + + +def test_tracer_env_vars(monkeypatch): + # GIVEN tracer disabled, is run without parameters + # WHEN service is explicitly defined + # THEN tracer should have use that service name + service_name = "booking" + monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) + tracer_env_var = Tracer(disabled=True) - assert put_annotation_mock.call_count == 1 - assert put_annotation_mock.call_args == mocker.call(key=annotation_key, value=annotation_value) + assert tracer_env_var.service == service_name + tracer_explicit = Tracer(disabled=True, service=service_name) + assert tracer_explicit.service == service_name -def test_tracer_custom_metadata(mocker, dummy_response, xray_stub): - put_metadata_mock = mocker.MagicMock() + monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "true") + tracer = Tracer() - xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) + assert bool(tracer.disabled) is True - tracer = Tracer(provider=xray_provider, service="booking") - annotation_key = "Booking response" - annotation_value = {"bookingStatus": "CONFIRMED"} + +def test_tracer_with_exception(mocker): + # GIVEN tracer is disabled, decorator is used + # WHEN a lambda handler or method returns an Exception + # THEN tracer should reraise the same Exception + class CustomException(Exception): + pass + + tracer = Tracer(disabled=True) @tracer.capture_lambda_handler def handler(event, context): - tracer.put_metadata(annotation_key, annotation_value) - return dummy_response + raise CustomException("test") + + @tracer.capture_method + def greeting(name, message): + raise CustomException("test") + + with pytest.raises(CustomException): + handler({}, {}) + + with pytest.raises(CustomException): + greeting(name="Foo", message="Bar") + - handler({}, mocker.MagicMock()) +def test_tracer_reuse(): + # GIVEN tracer A, B were initialized + # WHEN tracer B explicitly reuses A config + # THEN tracer B attributes should be equal to tracer A + service_name = "booking" + tracer_a = Tracer(disabled=True, service=service_name) + tracer_b = Tracer() - assert put_metadata_mock.call_count == 2 - assert put_metadata_mock.call_args_list[0] == mocker.call( - key=annotation_key, value=annotation_value, namespace="booking" - ) + assert id(tracer_a) != id(tracer_b) + assert tracer_a.__dict__.items() == tracer_b.__dict__.items() diff --git a/python/tests/functional/test_utils.py b/python/tests/functional/test_utils.py new file mode 100644 index 00000000000..141acf9d96f --- /dev/null +++ b/python/tests/functional/test_utils.py @@ -0,0 +1,122 @@ +import json +from typing import Callable + +import pytest + +from aws_lambda_powertools.middleware_factory import lambda_handler_decorator + + +@pytest.fixture +def say_hi_middleware() -> Callable: + @lambda_handler_decorator + def say_hi(handler, event, context): + print("hi before lambda handler is executed") + return handler(event, context) + + return say_hi + + +@pytest.fixture +def say_bye_middleware() -> Callable: + @lambda_handler_decorator + def say_bye(handler, event, context): + ret = handler(event, context) + print("goodbye after lambda handler is executed") + return ret + + return say_bye + + +def test_factory_single_decorator(capsys, say_hi_middleware): + @say_hi_middleware + def lambda_handler(evt, ctx): + return True + + lambda_handler({}, {}) + output = capsys.readouterr().out.strip() + assert "hi before lambda handler is executed" in output + + +def test_factory_nested_decorator(capsys, say_hi_middleware, say_bye_middleware): + @say_bye_middleware + @say_hi_middleware + def lambda_handler(evt, ctx): + return True + + lambda_handler({}, {}) + output = capsys.readouterr().out.strip() + assert "hi before lambda handler is executed" in output + assert "goodbye after lambda handler is executed" in output + + +def test_factory_exception_propagation(say_bye_middleware, say_hi_middleware): + @say_bye_middleware + @say_hi_middleware + def lambda_handler(evt, ctx): + raise ValueError("Something happened") + + with pytest.raises(ValueError): + lambda_handler({}, {}) + + +def test_factory_explicit_tracing(monkeypatch): + monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "true") + + @lambda_handler_decorator(trace_execution=True) + def no_op(handler, event, context): + ret = handler(event, context) + return ret + + @no_op + def lambda_handler(evt, ctx): + return True + + lambda_handler({}, {}) + + +def test_factory_explicit_tracing_env_var(monkeypatch): + monkeypatch.setenv("POWERTOOLS_TRACE_MIDDLEWARES", "true") + monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "true") + + @lambda_handler_decorator + def no_op(handler, event, context): + ret = handler(event, context) + return ret + + @no_op + def lambda_handler(evt, ctx): + return True + + lambda_handler({}, {}) + + +def test_factory_decorator_with_kwarg_params(capsys): + @lambda_handler_decorator + def log_event(handler, event, context, log_event=False): + if log_event: + print(json.dumps(event)) + return handler(event, context) + + @log_event(log_event=True) + def lambda_handler(evt, ctx): + return True + + event = {"message": "hello"} + lambda_handler(event, {}) + output = json.loads(capsys.readouterr().out.strip()) + + assert event == output + + +def test_factory_decorator_with_non_kwarg_params(): + @lambda_handler_decorator + def log_event(handler, event, context, log_event=False): + if log_event: + print(json.dumps(event)) + return handler(event, context) + + with pytest.raises(TypeError): + + @log_event(True) + def lambda_handler(evt, ctx): + return True diff --git a/python/tests/unit/test_tracing.py b/python/tests/unit/test_tracing.py index 91144d64f9d..a7b98389e33 100644 --- a/python/tests/unit/test_tracing.py +++ b/python/tests/unit/test_tracing.py @@ -1,3 +1,5 @@ +from unittest import mock + import pytest from aws_lambda_powertools.tracing import Tracer @@ -8,100 +10,146 @@ def dummy_response(): return {"test": "succeeds"} -def test_capture_lambda_handler(mocker, dummy_response): - # GIVEN tracer is disabled, and decorator is used - # WHEN a lambda handler is run - # THEN tracer should not raise an Exception - tracer = Tracer(disabled=True) +@pytest.fixture +def xray_stub(mocker): + class XRayStub: + def __init__( + self, + put_metadata_mock: mocker.MagicMock = None, + put_annotation_mock: mocker.MagicMock = None, + begin_subsegment_mock: mocker.MagicMock = None, + end_subsegment_mock: mocker.MagicMock = None, + ): + self.put_metadata_mock = put_metadata_mock or mocker.MagicMock() + self.put_annotation_mock = put_annotation_mock or mocker.MagicMock() + self.begin_subsegment_mock = begin_subsegment_mock or mocker.MagicMock() + self.end_subsegment_mock = end_subsegment_mock or mocker.MagicMock() - @tracer.capture_lambda_handler - def handler(event, context): - return dummy_response + def put_metadata(self, *args, **kwargs): + return self.put_metadata_mock(*args, **kwargs) - handler({}, mocker.MagicMock()) + def put_annotation(self, *args, **kwargs): + return self.put_annotation_mock(*args, **kwargs) + def begin_subsegment(self, *args, **kwargs): + return self.begin_subsegment_mock(*args, **kwargs) -def test_capture_method(mocker, dummy_response): - # GIVEN tracer is disabled, and method decorator is used - # WHEN a function is run - # THEN tracer should not raise an Exception + def end_subsegment(self, *args, **kwargs): + return self.end_subsegment_mock(*args, **kwargs) - tracer = Tracer(disabled=True) + return XRayStub - @tracer.capture_method - def greeting(name, message): - return dummy_response - greeting(name="Foo", message="Bar") +@pytest.fixture(scope="function", autouse=True) +def reset_tracing_config(): + Tracer._reset_config() + yield -def test_tracer_with_exception(mocker): - # GIVEN tracer is disabled, decorator is used - # WHEN a lambda handler or method returns an Exception - # THEN tracer should reraise the same Exception - class CustomException(Exception): - pass +def test_tracer_lambda_handler(mocker, dummy_response, xray_stub): + put_metadata_mock = mocker.MagicMock() + begin_subsegment_mock = mocker.MagicMock() + end_subsegment_mock = mocker.MagicMock() - tracer = Tracer(disabled=True) + xray_provider = xray_stub( + put_metadata_mock=put_metadata_mock, + begin_subsegment_mock=begin_subsegment_mock, + end_subsegment_mock=end_subsegment_mock, + ) + tracer = Tracer(provider=xray_provider, service="booking") @tracer.capture_lambda_handler def handler(event, context): - raise CustomException("test") + return dummy_response + + handler({}, mocker.MagicMock()) + + assert begin_subsegment_mock.call_count == 1 + assert begin_subsegment_mock.call_args == mocker.call(name="## handler") + assert end_subsegment_mock.call_count == 1 + assert put_metadata_mock.call_args == mocker.call( + key="lambda handler response", value=dummy_response, namespace="booking" + ) + + +def test_tracer_method(mocker, dummy_response, xray_stub): + put_metadata_mock = mocker.MagicMock() + put_annotation_mock = mocker.MagicMock() + begin_subsegment_mock = mocker.MagicMock() + end_subsegment_mock = mocker.MagicMock() + + xray_provider = xray_stub(put_metadata_mock, put_annotation_mock, begin_subsegment_mock, end_subsegment_mock) + tracer = Tracer(provider=xray_provider, service="booking") @tracer.capture_method def greeting(name, message): - raise CustomException("test") + return dummy_response - with pytest.raises(CustomException): - handler({}, mocker.MagicMock()) + greeting(name="Foo", message="Bar") - with pytest.raises(CustomException): - greeting(name="Foo", message="Bar") + assert begin_subsegment_mock.call_count == 1 + assert begin_subsegment_mock.call_args == mocker.call(name="## greeting") + assert end_subsegment_mock.call_count == 1 + assert put_metadata_mock.call_args == mocker.call( + key="greeting response", value=dummy_response, namespace="booking" + ) -def test_tracer_lambda_emulator(monkeypatch, mocker, dummy_response): - # GIVEN tracer is run locally - # WHEN a lambda function is run through SAM CLI - # THEN tracer should not raise an Exception - monkeypatch.setenv("AWS_SAM_LOCAL", "true") - tracer = Tracer() +def test_tracer_custom_metadata(mocker, dummy_response, xray_stub): + put_metadata_mock = mocker.MagicMock() + + xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) + + tracer = Tracer(provider=xray_provider, service="booking") + annotation_key = "Booking response" + annotation_value = {"bookingStatus": "CONFIRMED"} @tracer.capture_lambda_handler def handler(event, context): + tracer.put_metadata(annotation_key, annotation_value) return dummy_response handler({}, mocker.MagicMock()) + assert put_metadata_mock.call_count == 2 + assert put_metadata_mock.call_args_list[0] == mocker.call( + key=annotation_key, value=annotation_value, namespace="booking" + ) -def test_tracer_env_vars(monkeypatch): - # GIVEN tracer disabled, is run without parameters - # WHEN service is explicitly defined - # THEN tracer should have use that service name - service_name = "booking" - monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - tracer_env_var = Tracer(disabled=True) - - assert tracer_env_var.service == service_name - tracer_explicit = Tracer(disabled=True, service=service_name) - assert tracer_explicit.service == service_name +def test_tracer_custom_annotation(mocker, dummy_response, xray_stub): + put_annotation_mock = mocker.MagicMock() - monkeypatch.setenv("POWERTOOLS_TRACE_DISABLED", "true") - tracer = Tracer() + xray_provider = xray_stub(put_annotation_mock=put_annotation_mock) - assert bool(tracer.disabled) is True - - -def test_tracer_metadata_disabled(mocker, dummy_response): - # GIVEN tracer is disabled, and annotations/metadata are used - # WHEN a lambda handler is run - # THEN tracer should not raise an Exception and simply ignore - tracer = Tracer(disabled=True) + tracer = Tracer(provider=xray_provider, service="booking") + annotation_key = "BookingId" + annotation_value = "123456" @tracer.capture_lambda_handler def handler(event, context): - tracer.put_annotation("PaymentStatus", "SUCCESS") - tracer.put_metadata("PaymentMetadata", "Metadata") + tracer.put_annotation(annotation_key, annotation_value) return dummy_response handler({}, mocker.MagicMock()) + + assert put_annotation_mock.call_count == 1 + assert put_annotation_mock.call_args == mocker.call(key=annotation_key, value=annotation_value) + + +@mock.patch("aws_lambda_powertools.tracing.Tracer.patch") +def test_tracer_autopatch(patch_mock): + # GIVEN tracer is instantiated + # WHEN default options were used, or patch() was called + # THEN tracer should patch all modules + Tracer(disabled=True) + assert patch_mock.call_count == 1 + + +@mock.patch("aws_lambda_powertools.tracing.Tracer.patch") +def test_tracer_no_autopatch(patch_mock): + # GIVEN tracer is instantiated + # WHEN auto_patch is disabled + # THEN tracer should not patch any module + Tracer(disabled=True, auto_patch=False) + assert patch_mock.call_count == 0 From a99e6472c2ca342cb3d43f8767a58fa218d3b67d Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Fri, 24 Apr 2020 17:34:41 +0100 Subject: [PATCH 12/21] Adopt logging best practices (#23) * fix: set NullHandler for package logger * improv: remove root logger logic, lib * fix: update exception type * improv: propagate log level if set, null otherwise * fix: explicit wins over env var * chore: fix test naming * fix: exception logging * improv: shorten log location * feat: add Logger class wrapper * improv: add deprecation warning * BREAKING CHANGE: logger_setup, inject_lambda_ctx * improv: update tests * improv: cover duplicated keys edge case * improv: cover debug package logging * improv: more coverage, linting * improv: complete coverage, fix dead code * docs: update readme to reflect changes * fix: address jacob's code review feedback * chore: linting * fix: metric spillover at 100, not 101 * fix: trace auto-disable, doc edge cases * chore: linting * chore: 0.8.0 version bump Co-authored-by: heitorlessa --- README.md | 2 +- python/HISTORY.md | 6 + python/README.md | 70 ++- python/aws_lambda_powertools/__init__.py | 5 + .../aws_lambda_powertools/logging/__init__.py | 4 +- .../logging/aws_lambda_logging.py | 98 ---- .../logging/exceptions.py | 2 + .../aws_lambda_powertools/logging/logger.py | 433 +++++++++++++----- python/aws_lambda_powertools/metrics/base.py | 26 +- .../aws_lambda_powertools/metrics/metric.py | 4 - .../aws_lambda_powertools/metrics/metrics.py | 4 - .../middleware_factory/exceptions.py | 2 + .../middleware_factory/factory.py | 13 +- .../aws_lambda_powertools/tracing/tracer.py | 26 +- python/pyproject.toml | 2 +- .../functional/test_aws_lambda_logging.py | 63 +-- python/tests/functional/test_logger.py | 108 +++-- python/tests/functional/test_metrics.py | 59 ++- ...st_utils.py => test_middleware_factory.py} | 21 +- python/tests/unit/test_tracing.py | 28 ++ 20 files changed, 641 insertions(+), 335 deletions(-) delete mode 100644 python/aws_lambda_powertools/logging/aws_lambda_logging.py create mode 100644 python/aws_lambda_powertools/logging/exceptions.py create mode 100644 python/aws_lambda_powertools/middleware_factory/exceptions.py rename python/tests/functional/{test_utils.py => test_middleware_factory.py} (82%) diff --git a/README.md b/README.md index 175e8b98470..9a1245bfa80 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Lambda Powertools -![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) +![Python Build](https://github.com/awslabs/aws-lambda-powertools/workflows/Powertools%20Python/badge.svg?branch=master) A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier. diff --git a/python/HISTORY.md b/python/HISTORY.md index 9d9d296c1c2..8001c9bba2c 100644 --- a/python/HISTORY.md +++ b/python/HISTORY.md @@ -1,5 +1,11 @@ # HISTORY +## April 24th + +* Introduces `Logger` for stuctured logging as a replacement for `logger_setup` +* Introduces `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` +* Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` + ## April 20th, 2020 **0.7.0** diff --git a/python/README.md b/python/README.md index 5a96dc80b63..ff22f799077 100644 --- a/python/README.md +++ b/python/README.md @@ -13,7 +13,7 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, > It currently uses AWS X-Ray * Decorators that capture cold start as annotation, and response and exceptions as metadata -* Run functions locally without code change to disable tracing +* Run functions locally with SAM CLI without code change to disable tracing * Explicitly disable tracing via env var `POWERTOOLS_TRACE_DISABLED="true"` **Logging** @@ -24,6 +24,7 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, * Logs canonical custom metric line to logs that can be consumed asynchronously * Log sampling enables DEBUG log level for a percentage of requests (disabled by default) - Enable via `POWERTOOLS_LOGGER_SAMPLE_RATE=0.1`, ranges from 0 to 1, where 0.1 is 10% and 1 is 100% +* Append additional keys to structured log at any point in time so they're available across log statements **Metrics** @@ -113,6 +114,8 @@ tracer = Tracer(auto_patch=False) # new instance using existing configuration wi ### Logging +> **NOTE** `logger_setup` and `logger_inject_lambda_context` are deprecated and will be completely removed once it's GA. + **Example SAM template using supported environment variables** ```yaml @@ -128,13 +131,12 @@ Globals: **Pseudo Python Lambda code** ```python -from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context +from aws_lambda_powertools.logging import Logger -logger = logger_setup() -# logger_setup(service="payment") # also accept explicit service name -# logger_setup(level="INFO") # also accept explicit log level +logger = Logger() +# Logger(service="payment", level="INFO") # also accepts explicit service name, log level -@logger_inject_lambda_context +@logger.inject_lambda_context def handler(event, context) logger.info("Collecting payment") ... @@ -159,6 +161,7 @@ def handler(event, context) "lambda_function_arn":"arn:aws:lambda:eu-west-1:12345678910:function:test", "lambda_request_id":"52fdfc07-2182-154f-163f-5f0f9a621d72", "cold_start": "true", + "sampling_rate": 0.1, "message": "Collecting payment" } @@ -172,6 +175,7 @@ def handler(event, context) "lambda_function_arn":"arn:aws:lambda:eu-west-1:12345678910:function:test", "lambda_request_id":"52fdfc07-2182-154f-163f-5f0f9a621d72", "cold_start": "true", + "sampling_rate": 0.1, "message":{ "operation":"collect_payment", "charge_id": "ch_AZFlk2345C0" @@ -179,6 +183,40 @@ def handler(event, context) } ``` +**Append additional keys to structured log** + +```python +from aws_lambda_powertools.logging import Logger + +logger = Logger() + +@logger.inject_lambda_context +def handler(event, context) + if "order_id" in event: + logger.structure_logs(append=True, order_id=event["order_id"]) + logger.info("Collecting payment") + ... +``` + +**Exerpt output in CloudWatch Logs** + +```json +{ + "timestamp":"2019-08-22 18:17:33,774", + "level":"INFO", + "location":"collect.handler:1", + "service":"payment", + "lambda_function_name":"test", + "lambda_function_memory_size":"128", + "lambda_function_arn":"arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id":"52fdfc07-2182-154f-163f-5f0f9a621d72", + "cold_start": "true", + "sampling_rate": 0.1, + "order_id": "order_id_value", + "message": "Collecting payment" +} +``` + ### Custom Metrics async > **NOTE** `log_metric` will be removed once it's GA. @@ -229,6 +267,7 @@ with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1) as metric: metric.add_dimension(name="function_context", value="$LATEST") ``` +> **NOTE**: If you want to instantiate Metrics() in multiple places in your code, make sure to use `POWERTOOLS_METRICS_NAMESPACE` env var as we don't keep a copy of that across instances. ### Utilities @@ -320,12 +359,19 @@ def lambda_handler(event, context): return True ``` -## Beta -> **[Progress towards GA](https://github.com/awslabs/aws-lambda-powertools/projects/1)** +### Debug mode + +By default, all debug log statements from AWS Lambda Powertools package are suppressed. If you'd like to enable them, use `set_package_logger` utility: + +```python +import aws_lambda_powertools +aws_lambda_powertools.logging.logger.set_package_logger() +... +``` + +## Beta -This library may change its API/methods or environment variables as it receives feedback from customers. Currently looking for ideas in the following areas before making it stable: +This library may change its API/methods or environment variables as it receives feedback from customers -* **Should Tracer patch all possible imported libraries by default or only AWS SDKs?** - - Patching all libraries may have a small performance penalty (~50ms) at cold start - - Alternatively, we could patch only AWS SDK if available and to provide a param to patch multiple `Tracer(modules=("boto3", "requests"))` +**[Progress towards GA](https://github.com/awslabs/aws-lambda-powertools/projects/1)** diff --git a/python/aws_lambda_powertools/__init__.py b/python/aws_lambda_powertools/__init__.py index b048ddfe742..2be705fb386 100644 --- a/python/aws_lambda_powertools/__init__.py +++ b/python/aws_lambda_powertools/__init__.py @@ -1,5 +1,10 @@ # -*- coding: utf-8 -*- """Top-level package for Lambda Python Powertools.""" +import logging __author__ = """Amazon Web Services""" + +logger = logging.getLogger("aws_lambda_powertools") +logger.addHandler(logging.NullHandler()) +logger.propagate = False diff --git a/python/aws_lambda_powertools/logging/__init__.py b/python/aws_lambda_powertools/logging/__init__.py index 855abba4635..4c1bb2ec5c6 100644 --- a/python/aws_lambda_powertools/logging/__init__.py +++ b/python/aws_lambda_powertools/logging/__init__.py @@ -1,6 +1,6 @@ """Logging utility """ from ..helper.models import MetricUnit -from .logger import log_metric, logger_inject_lambda_context, logger_setup +from .logger import Logger, log_metric, logger_inject_lambda_context, logger_setup -__all__ = ["logger_setup", "logger_inject_lambda_context", "log_metric", "MetricUnit"] +__all__ = ["logger_setup", "logger_inject_lambda_context", "log_metric", "MetricUnit", "Logger"] diff --git a/python/aws_lambda_powertools/logging/aws_lambda_logging.py b/python/aws_lambda_powertools/logging/aws_lambda_logging.py deleted file mode 100644 index 1b42ec3c707..00000000000 --- a/python/aws_lambda_powertools/logging/aws_lambda_logging.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Microlibrary to simplify logging in AWS Lambda. -Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/ -""" -import json -import logging - - -def json_formatter(obj): - """Formatter for unserialisable values.""" - return str(obj) - - -class JsonFormatter(logging.Formatter): - """AWS Lambda Logging formatter. - - Formats the log message as a JSON encoded string. If the message is a - dict it will be used directly. If the message can be parsed as JSON, then - the parse d value is used in the output record. - """ - - def __init__(self, **kwargs): - """Return a JsonFormatter instance. - - The `json_default` kwarg is used to specify a formatter for otherwise - unserialisable values. It must not throw. Defaults to a function that - coerces the value to a string. - - Other kwargs are used to specify log field format strings. - """ - datefmt = kwargs.pop("datefmt", None) - - super(JsonFormatter, self).__init__(datefmt=datefmt) - self.format_dict = { - "timestamp": "%(asctime)s", - "level": "%(levelname)s", - "location": "%(name)s.%(funcName)s:%(lineno)d", - } - self.format_dict.update(kwargs) - self.default_json_formatter = kwargs.pop("json_default", json_formatter) - - def format(self, record): # noqa: A003 - record_dict = record.__dict__.copy() - record_dict["asctime"] = self.formatTime(record, self.datefmt) - - log_dict = {k: v % record_dict for k, v in self.format_dict.items() if v} - - if isinstance(record_dict["msg"], dict): - log_dict["message"] = record_dict["msg"] - else: - log_dict["message"] = record.getMessage() - - # Attempt to decode the message as JSON, if so, merge it with the - # overall message for clarity. - try: - log_dict["message"] = json.loads(log_dict["message"]) - except (TypeError, ValueError): - pass - - if record.exc_info: - # Cache the traceback text to avoid converting it multiple times - # (it's constant anyway) - # from logging.Formatter:format - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - - if record.exc_text: - log_dict["exception"] = record.exc_text - - json_record = json.dumps(log_dict, default=self.default_json_formatter) - - if hasattr(json_record, "decode"): # pragma: no cover - json_record = json_record.decode("utf-8") - - return json_record - - -def setup(level="DEBUG", formatter_cls=JsonFormatter, boto_level=None, **kwargs): - """Overall Metadata Formatting.""" - if formatter_cls: # pragma: no cover - for handler in logging.root.handlers: - handler.setFormatter(formatter_cls(**kwargs)) - - try: - logging.root.setLevel(level) - except ValueError: - logging.root.error("Invalid log level: %s", level) - level = "INFO" - logging.root.setLevel(level) - - if not boto_level: - boto_level = level - - try: # pragma: no cover - logging.getLogger("boto").setLevel(boto_level) - logging.getLogger("boto3").setLevel(boto_level) - logging.getLogger("botocore").setLevel(boto_level) - except ValueError: # pragma: no cover - logging.root.error("Invalid log level: %s", boto_level) diff --git a/python/aws_lambda_powertools/logging/exceptions.py b/python/aws_lambda_powertools/logging/exceptions.py new file mode 100644 index 00000000000..65b30906edf --- /dev/null +++ b/python/aws_lambda_powertools/logging/exceptions.py @@ -0,0 +1,2 @@ +class InvalidLoggerSamplingRateError(Exception): + pass diff --git a/python/aws_lambda_powertools/logging/logger.py b/python/aws_lambda_powertools/logging/logger.py index 32dc1be6c1a..02c0e912b12 100644 --- a/python/aws_lambda_powertools/logging/logger.py +++ b/python/aws_lambda_powertools/logging/logger.py @@ -1,160 +1,155 @@ +import copy import functools import itertools +import json import logging import os import random +import sys import warnings from distutils.util import strtobool -from typing import Any, Callable, Dict +from typing import Any, Callable, Dict, Union from ..helper.models import MetricUnit, build_lambda_context_model, build_metric_unit_from_str -from . import aws_lambda_logging +from .exceptions import InvalidLoggerSamplingRateError logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) is_cold_start = True -def logger_setup(service: str = "service_undefined", level: str = "INFO", sampling_rate: float = 0.0, **kwargs): - """Setups root logger to format statements in JSON. +def json_formatter(unserialized_value: Any): + """JSON custom serializer to cast unserialisable values to strings. - Includes service name and any additional key=value into logs - It also accepts both service name or level explicitly via env vars + Example + ------- - Environment variables - --------------------- - POWERTOOLS_SERVICE_NAME : str - service name - LOG_LEVEL: str - logging level (e.g. INFO, DEBUG) - POWERTOOLS_LOGGER_SAMPLE_RATE: float - samping rate ranging from 0 to 1, 1 being 100% sampling + **Serialize unserialisable value to string** + + class X: pass + value = {"x": X()} + + json.dumps(value, default=json_formatter) Parameters ---------- - service : str, optional - service name to be appended in logs, by default "service_undefined" - level : str, optional - logging.level, by default "INFO" - sample_rate: float, optional - sample rate for debug calls within execution context defaults to 0 + unserialized_value: Any + Python object unserializable by JSON + """ + return str(unserialized_value) - Example - ------- - Setups structured logging in JSON for Lambda functions with explicit service name - >>> from aws_lambda_powertools.logging import logger_setup - >>> logger = logger_setup(service="payment") - >>> - >>> def handler(event, context): - logger.info("Hello") +class JsonFormatter(logging.Formatter): + """AWS Lambda Logging formatter. - Setups structured logging in JSON for Lambda functions using env vars + Formats the log message as a JSON encoded string. If the message is a + dict it will be used directly. If the message can be parsed as JSON, then + the parse d value is used in the output record. - $ export POWERTOOLS_SERVICE_NAME="payment" - $ export POWERTOOLS_LOGGER_SAMPLE_RATE=0.01 # 1% debug sampling - >>> from aws_lambda_powertools.logging import logger_setup - >>> logger = logger_setup() - >>> - >>> def handler(event, context): - logger.info("Hello") + Originally taken from https://gitlab.com/hadrien/aws_lambda_logging/ """ - service = os.getenv("POWERTOOLS_SERVICE_NAME") or service - sampling_rate = os.getenv("POWERTOOLS_LOGGER_SAMPLE_RATE") or sampling_rate - log_level = os.getenv("LOG_LEVEL") or level - logger = logging.getLogger(name=service) - - try: - if sampling_rate and random.random() <= float(sampling_rate): - log_level = logging.DEBUG - except ValueError: - raise ValueError( - "fExpected a float value ranging 0 to 1, but received {sampling_rate} instead. Please review POWERTOOLS_LOGGER_SAMPLE_RATE environment variable." # noqa E501 - ) - logger.setLevel(log_level) + def __init__(self, **kwargs): + """Return a JsonFormatter instance. - # Patch logger by structuring its outputs as JSON - aws_lambda_logging.setup(level=log_level, service=service, sampling_rate=sampling_rate, **kwargs) + The `json_default` kwarg is used to specify a formatter for otherwise + unserialisable values. It must not throw. Defaults to a function that + coerces the value to a string. - return logger + Other kwargs are used to specify log field format strings. + """ + datefmt = kwargs.pop("datefmt", None) + super(JsonFormatter, self).__init__(datefmt=datefmt) + self.format_dict = { + "timestamp": "%(asctime)s", + "level": "%(levelname)s", + "location": "%(funcName)s:%(lineno)d", + } + self.format_dict.update(kwargs) + self.default_json_formatter = kwargs.pop("json_default", json_formatter) -def logger_inject_lambda_context(lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False): - """Decorator to capture Lambda contextual info and inject into struct logging + def format(self, record): # noqa: A003 + record_dict = record.__dict__.copy() + record_dict["asctime"] = self.formatTime(record, self.datefmt) - Parameters - ---------- - log_event : bool, optional - Instructs logger to log Lambda Event, by default False + log_dict = {} + for key, value in self.format_dict.items(): + if value: + # converts default logging expr to its record value + # e.g. '%(asctime)s' to '2020-04-24 09:35:40,698' + log_dict[key] = value % record_dict - Environment variables - --------------------- - POWERTOOLS_LOGGER_LOG_EVENT : str - instruct logger to log Lambda Event (e.g. `"true", "True", "TRUE"`) + if isinstance(record_dict["msg"], dict): + log_dict["message"] = record_dict["msg"] + else: + log_dict["message"] = record.getMessage() - Example - ------- - **Captures Lambda contextual runtime info (e.g memory, arn, req_id)** + # Attempt to decode the message as JSON, if so, merge it with the + # overall message for clarity. + try: + log_dict["message"] = json.loads(log_dict["message"]) + except (json.decoder.JSONDecodeError, TypeError, ValueError): + pass - from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context - import logging + if record.exc_info: + # Cache the traceback text to avoid converting it multiple times + # (it's constant anyway) + # from logging.Formatter:format + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) - logger = logging.getLogger(__name__) - logging.setLevel(logging.INFO) - logger_setup() + if record.exc_text: + log_dict["exception"] = record.exc_text - @logger_inject_lambda_context - def handler(event, context): - logger.info("Hello") + json_record = json.dumps(log_dict, default=self.default_json_formatter) - **Captures Lambda contextual runtime info and logs incoming request** + if hasattr(json_record, "decode"): # pragma: no cover + json_record = json_record.decode("utf-8") - from aws_lambda_powertools.logging import logger_setup, logger_inject_lambda_context - import logging + return json_record - logger = logging.getLogger(__name__) - logging.setLevel(logging.INFO) - logger_setup() - @logger_inject_lambda_context(log_event=True) - def handler(event, context): - logger.info("Hello") +def logger_setup( + service: str = None, level: str = None, sampling_rate: float = 0.0, legacy: bool = False, **kwargs +) -> DeprecationWarning: + """DEPRECATED - Returns + This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead + + Example ------- - decorate : Callable - Decorated lambda handler - """ + **Logger class - Same UX** - # If handler is None we've been called with parameters - # Return a partial function with args filled - if lambda_handler is None: - logger.debug("Decorator called with parameters") - return functools.partial(logger_inject_lambda_context, log_event=log_event) + from aws_lambda_powertools.logging import Logger + logger = Logger(service="payment") # same env var still applies - log_event_env_option = str(os.getenv("POWERTOOLS_LOGGER_LOG_EVENT", "false")) - log_event = strtobool(log_event_env_option) or log_event + """ + raise DeprecationWarning("Use Logger instead - This method will be removed when GA") - @functools.wraps(lambda_handler) - def decorate(event, context): - if log_event: - logger.debug("Event received") - logger.info(event) - lambda_context = build_lambda_context_model(context) - cold_start = __is_cold_start() +def logger_inject_lambda_context( + lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False +) -> DeprecationWarning: + """DEPRECATED - logger_setup(cold_start=cold_start, **lambda_context.__dict__) + This will be removed when GA - Use `aws_lambda_powertools.logging.logger.Logger` instead - return lambda_handler(event, context) + Example + ------- + **Logger class - Same UX** - return decorate + from aws_lambda_powertools.logging import Logger + logger = Logger(service="payment") # same env var still applies + @logger.inject_lambda_context + def handler(evt, ctx): + pass + """ + raise DeprecationWarning("Use Logger instead - This method will be removed when GA") -def __is_cold_start() -> str: +def _is_cold_start() -> str: """Verifies whether is cold start and return a string used for struct logging Returns @@ -278,3 +273,231 @@ def __build_dimensions(**dimensions) -> str: dimension = ",".join(dimensions_list) return dimension + + +class Logger(logging.Logger): + """Creates and setups a logger to format statements in JSON. + + Includes service name and any additional key=value into logs + It also accepts both service name or level explicitly via env vars + + Environment variables + --------------------- + POWERTOOLS_SERVICE_NAME : str + service name + LOG_LEVEL: str, int + logging level (e.g. INFO, DEBUG) + POWERTOOLS_LOGGER_SAMPLE_RATE: float + samping rate ranging from 0 to 1, 1 being 100% sampling + + Parameters + ---------- + service : str, optional + service name to be appended in logs, by default "service_undefined" + level : str, optional + logging.level, by default "INFO" + sample_rate: float, optional + sample rate for debug calls within execution context defaults to 0.0 + stream: sys.stdout, optional + valid output for a logging stream, by default sys.stdout + + Example + ------- + **Setups structured logging in JSON for Lambda functions with explicit service name** + + >>> from aws_lambda_powertools.logging import Logger + >>> logger = Logger(service="payment") + >>> + >>> def handler(event, context): + logger.info("Hello") + + **Setups structured logging in JSON for Lambda functions using env vars** + + $ export POWERTOOLS_SERVICE_NAME="payment" + $ export POWERTOOLS_LOGGER_SAMPLE_RATE=0.01 # 1% debug sampling + >>> from aws_lambda_powertools.logging import Logger + >>> logger = Logger() + >>> + >>> def handler(event, context): + logger.info("Hello") + + **Append payment_id to previously setup structured log logger** + + >>> from aws_lambda_powertools.logging import Logger + >>> logger = Logger(service="payment") + >>> + >>> def handler(event, context): + logger.structure_logs(append=True, payment_id=event["payment_id"]) + logger.info("Hello") + + Parameters + ---------- + logging : logging.Logger + Inherits Logger + service: str + name of the service to create the logger for, "service_undefined" by default + level: str, int + log level, INFO by default + sampling_rate: float + debug log sampling rate, 0.0 by default + stream: sys.stdout + log stream, stdout by default + + Raises + ------ + InvalidLoggerSamplingRateError + When sampling rate provided is not a float + """ + + def __init__( + self, + service: str = None, + level: Union[str, int] = None, + sampling_rate: float = None, + stream: sys.stdout = None, + **kwargs, + ): + self.service = service or os.getenv("POWERTOOLS_SERVICE_NAME") or "service_undefined" + self.sampling_rate = sampling_rate or os.getenv("POWERTOOLS_LOGGER_SAMPLE_RATE") or 0.0 + self.log_level = level or os.getenv("LOG_LEVEL") or logging.INFO + self.handler = logging.StreamHandler(stream) if stream is not None else logging.StreamHandler(sys.stdout) + self._default_log_keys = {"service": self.service, "sampling_rate": self.sampling_rate} + self.log_keys = copy.copy(self._default_log_keys) + + super().__init__(name=self.service, level=self.log_level) + + try: + if self.sampling_rate and random.random() <= float(self.sampling_rate): + logger.debug("Setting log level to Debug due to sampling rate") + self.log_level = logging.DEBUG + except ValueError: + raise InvalidLoggerSamplingRateError( + f"Expected a float value ranging 0 to 1, but received {self.sampling_rate} instead. Please review POWERTOOLS_LOGGER_SAMPLE_RATE environment variable." # noqa E501 + ) + + self.setLevel(self.log_level) + self.structure_logs(**kwargs) + self.addHandler(self.handler) + + def inject_lambda_context(self, lambda_handler: Callable[[Dict, Any], Any] = None, log_event: bool = False): + """Decorator to capture Lambda contextual info and inject into struct logging + + Parameters + ---------- + log_event : bool, optional + Instructs logger to log Lambda Event, by default False + + Environment variables + --------------------- + POWERTOOLS_LOGGER_LOG_EVENT : str + instruct logger to log Lambda Event (e.g. `"true", "True", "TRUE"`) + + Example + ------- + **Captures Lambda contextual runtime info (e.g memory, arn, req_id)** + + from aws_lambda_powertools.logging import Logger + + logger = Logger(service="payment") + + @logger.inject_lambda_context + def handler(event, context): + logger.info("Hello") + + **Captures Lambda contextual runtime info and logs incoming request** + + from aws_lambda_powertools.logging import Logger + + logger = Logger(service="payment") + + @logger.inject_lambda_context(log_event=True) + def handler(event, context): + logger.info("Hello") + + Returns + ------- + decorate : Callable + Decorated lambda handler + """ + + # If handler is None we've been called with parameters + # Return a partial function with args filled + if lambda_handler is None: + logger.debug("Decorator called with parameters") + return functools.partial(self.inject_lambda_context, log_event=log_event) + + log_event_env_option = str(os.getenv("POWERTOOLS_LOGGER_LOG_EVENT", "false")) + log_event = strtobool(log_event_env_option) or log_event + + @functools.wraps(lambda_handler) + def decorate(event, context): + if log_event: + logger.debug("Event received") + self.info(event) + + lambda_context = build_lambda_context_model(context) + cold_start = _is_cold_start() + + self.structure_logs(cold_start=cold_start, **lambda_context.__dict__) + return lambda_handler(event, context) + + return decorate + + def structure_logs(self, append: bool = False, **kwargs): + """Sets logging formatting to JSON. + + Optionally, it can append keyword arguments + to an existing logger so it is available + across future log statements. + + Last keyword argument and value wins if duplicated. + + Parameters + ---------- + append : bool, optional + [description], by default False + """ + self.handler.setFormatter(JsonFormatter(**self._default_log_keys, **kwargs)) + + if append: + new_keys = {**self.log_keys, **kwargs} + self.handler.setFormatter(JsonFormatter(**new_keys)) + + self.log_keys.update(**kwargs) + + +def set_package_logger( + level: Union[str, int] = logging.DEBUG, stream: sys.stdout = None, formatter: logging.Formatter = None +): + """Set an additional stream handler, formatter, and log level for aws_lambda_powertools package logger. + + **Package log by default is supressed (NullHandler), this should only used for debugging. + This is separate from application Logger class utility** + + Example + ------- + **Enables debug logging for AWS Lambda Powertools package** + + >>> from aws_lambda_powertools.logging.logger import set_package_logger + >>> set_package_logger() + + Parameters + ---------- + level: str, int + log level, DEBUG by default + stream: sys.stdout + log stream, stdout by default + formatter: logging.Formatter + log formatter, "%(asctime)s %(name)s [%(levelname)s] %(message)s" by default + """ + if formatter is None: + formatter = logging.Formatter("%(asctime)s %(name)s [%(levelname)s] %(message)s") + + if stream is None: + stream = sys.stdout + + logger = logging.getLogger("aws_lambda_powertools") + logger.setLevel(level) + handler = logging.StreamHandler(stream) + handler.setFormatter(formatter) + logger.addHandler(handler) diff --git a/python/aws_lambda_powertools/metrics/base.py b/python/aws_lambda_powertools/metrics/base.py index 448bfc37e02..8d561abf71d 100644 --- a/python/aws_lambda_powertools/metrics/base.py +++ b/python/aws_lambda_powertools/metrics/base.py @@ -13,12 +13,13 @@ from .exceptions import MetricUnitError, MetricValueError, SchemaValidationError, UniqueNamespaceError logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) _schema_path = pathlib.Path(__file__).parent / "./schema.json" with _schema_path.open() as f: CLOUDWATCH_EMF_SCHEMA = json.load(f) +MAX_METRICS = 100 + class MetricManager: """Base class for metric functionality (namespace, metric, dimension, serialization) @@ -101,12 +102,6 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): MetricUnitError When metric unit is not supported by CloudWatch """ - if len(self.metric_set) == 100: - logger.debug("Exceeded maximum of 100 metrics - Publishing existing metric set") - metrics = self.serialize_metric_set() - print(json.dumps(metrics)) - self.metric_set = {} - if not isinstance(value, numbers.Number): raise MetricValueError(f"{value} is not a valid number") @@ -121,6 +116,12 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): logger.debug(f"Adding metric: {name} with {metric}") self.metric_set[name] = metric + if len(self.metric_set) == MAX_METRICS: + logger.debug(f"Exceeded maximum of {MAX_METRICS} metrics - Publishing existing metric set") + metrics = self.serialize_metric_set() + print(json.dumps(metrics)) + self.metric_set = {} + def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> Dict: """Serializes metric and dimensions set @@ -149,10 +150,10 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> SchemaValidationError Raised when serialization fail schema validation """ - if metrics is None: + if metrics is None: # pragma: no cover metrics = self.metric_set - if dimensions is None: + if dimensions is None: # pragma: no cover dimensions = self.dimension_set logger.debug("Serializing...", {"metrics": metrics, "dimensions": dimensions}) @@ -164,11 +165,10 @@ def serialize_metric_set(self, metrics: Dict = None, dimensions: Dict = None) -> for metric_name in metrics: metric: str = metrics[metric_name] metric_value: int = metric.get("Value", 0) - metric_unit: str = metric.get("Unit") + metric_unit: str = metric.get("Unit", "") - if metric_value > 0 and metric_unit is not None: - metric_names_unit.append({"Name": metric_name, "Unit": metric["Unit"]}) - metric_set.update({metric_name: metric["Value"]}) + metric_names_unit.append({"Name": metric_name, "Unit": metric_unit}) + metric_set.update({metric_name: metric_value}) metrics_definition = { "CloudWatchMetrics": [ diff --git a/python/aws_lambda_powertools/metrics/metric.py b/python/aws_lambda_powertools/metrics/metric.py index 85ce527e576..05a8d4ce76b 100644 --- a/python/aws_lambda_powertools/metrics/metric.py +++ b/python/aws_lambda_powertools/metrics/metric.py @@ -1,6 +1,5 @@ import json import logging -import os from contextlib import contextmanager from typing import Dict @@ -8,7 +7,6 @@ from aws_lambda_powertools.metrics.base import MetricManager logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) class SingleMetric(MetricManager): @@ -113,8 +111,6 @@ def single_metric(name: str, unit: MetricUnit, value: float): yield metric logger.debug("Serializing single metric") metric_set: Dict = metric.serialize_metric_set() - except Exception as e: - raise e finally: logger.debug("Publishing single metric", {"metric": metric}) print(json.dumps(metric_set)) diff --git a/python/aws_lambda_powertools/metrics/metrics.py b/python/aws_lambda_powertools/metrics/metrics.py index 24d8f2b93a1..390356b1461 100644 --- a/python/aws_lambda_powertools/metrics/metrics.py +++ b/python/aws_lambda_powertools/metrics/metrics.py @@ -1,13 +1,11 @@ import functools import json import logging -import os from typing import Any, Callable from aws_lambda_powertools.metrics.base import MetricManager logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) class Metrics(MetricManager): @@ -101,8 +99,6 @@ def handler(event, context) def decorate(*args, **kwargs): try: response = lambda_handler(*args, **kwargs) - except Exception as e: - raise e finally: metrics = self.serialize_metric_set() logger.debug("Publishing metrics", {"metrics": metrics}) diff --git a/python/aws_lambda_powertools/middleware_factory/exceptions.py b/python/aws_lambda_powertools/middleware_factory/exceptions.py new file mode 100644 index 00000000000..55d5b2342bb --- /dev/null +++ b/python/aws_lambda_powertools/middleware_factory/exceptions.py @@ -0,0 +1,2 @@ +class MiddlewareInvalidArgumentError(Exception): + pass diff --git a/python/aws_lambda_powertools/middleware_factory/factory.py b/python/aws_lambda_powertools/middleware_factory/factory.py index 4dcab2adf33..43c8e5ad9fa 100644 --- a/python/aws_lambda_powertools/middleware_factory/factory.py +++ b/python/aws_lambda_powertools/middleware_factory/factory.py @@ -6,9 +6,9 @@ from typing import Callable from ..tracing import Tracer +from .exceptions import MiddlewareInvalidArgumentError logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) def lambda_handler_decorator(decorator: Callable = None, trace_execution=False): @@ -97,7 +97,7 @@ def lambda_handler(event, context): Raises ------ - TypeError + MiddlewareInvalidArgumentError When middleware receives non keyword=arguments """ @@ -113,7 +113,8 @@ def final_decorator(func: Callable = None, **kwargs): return functools.partial(final_decorator, **kwargs) if not inspect.isfunction(func): - raise TypeError( + # @custom_middleware(True) vs @custom_middleware(log_event=True) + raise MiddlewareInvalidArgumentError( f"Only keyword arguments is supported for middlewares: {decorator.__qualname__} received {func}" ) @@ -129,9 +130,9 @@ def wrapper(event, context): else: response = middleware() return response - except Exception as err: - logger.error(f"Caught exception in {decorator.__qualname__}") - raise err + except Exception: + logger.exception(f"Caught exception in {decorator.__qualname__}") + raise return wrapper diff --git a/python/aws_lambda_powertools/tracing/tracer.py b/python/aws_lambda_powertools/tracing/tracer.py index 0f3e3cff8bb..19cc319492f 100644 --- a/python/aws_lambda_powertools/tracing/tracer.py +++ b/python/aws_lambda_powertools/tracing/tracer.py @@ -9,7 +9,6 @@ is_cold_start = True logger = logging.getLogger(__name__) -logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) class Tracer: @@ -175,9 +174,9 @@ def decorate(event, context): if response: self.put_metadata("lambda handler response", response) except Exception as err: - logger.debug("Exception received from lambda handler") + logger.exception("Exception received from lambda handler", exc_info=True) self.put_metadata(f"{self.service}_error", err) - raise err + raise finally: self.end_subsegment() @@ -223,9 +222,9 @@ def decorate(*args, **kwargs): if response is not None: self.put_metadata(f"{method_name} response", response) except Exception as err: - logger.debug(f"Exception received from '{method_name}'' method") + logger.exception(f"Exception received from '{method_name}'' method", exc_info=True) self.put_metadata(f"{method_name} error", err) - raise err + raise finally: self.end_subsegment() @@ -344,17 +343,10 @@ def patch(self): """Patch modules for instrumentation""" logger.debug("Patching modules...") - is_lambda_emulator = os.getenv("AWS_SAM_LOCAL", False) - is_lambda_env = os.getenv("LAMBDA_TASK_ROOT", False) - if self.disabled: logger.debug("Tracing has been disabled, aborting patch") return - if is_lambda_emulator or is_lambda_env: - logger.debug("Running under SAM CLI env or not in Lambda; aborting patch") - return - patch_all() # pragma: no cover def __disable_tracing_provider(self): @@ -369,7 +361,7 @@ def __is_trace_disabled(self) -> bool: Tracing is automatically disabled in the following conditions: 1. Explicitly disabled via `TRACE_DISABLED` environment variable - 2. Running in Lambda Emulators where X-Ray Daemon will not be listening + 2. Running in Lambda Emulators, or locally where X-Ray Daemon will not be listening 3. Explicitly disabled via constructor e.g `Tracer(disabled=True)` Returns @@ -377,7 +369,7 @@ def __is_trace_disabled(self) -> bool: bool """ logger.debug("Verifying whether Tracing has been disabled") - is_lambda_emulator = os.getenv("AWS_SAM_LOCAL") + is_lambda_sam_cli = os.getenv("AWS_SAM_LOCAL") env_option = str(os.getenv("POWERTOOLS_TRACE_DISABLED", "false")) disabled_env = strtobool(env_option) @@ -385,9 +377,9 @@ def __is_trace_disabled(self) -> bool: logger.debug("Tracing has been disabled via env var POWERTOOLS_TRACE_DISABLED") return disabled_env - if is_lambda_emulator: - logger.debug("Running under SAM CLI env; Tracing has been disabled") - return is_lambda_emulator + if is_lambda_sam_cli: + logger.debug("Running under SAM CLI env or not in Lambda env; disabling Tracing") + return True return False diff --git a/python/pyproject.toml b/python/pyproject.toml index 3360e7fd7ed..8b21c9dce86 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.7.0" +version = "0.8.0" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ diff --git a/python/tests/functional/test_aws_lambda_logging.py b/python/tests/functional/test_aws_lambda_logging.py index 6d5efd492a7..4a4513134dc 100644 --- a/python/tests/functional/test_aws_lambda_logging.py +++ b/python/tests/functional/test_aws_lambda_logging.py @@ -3,51 +3,60 @@ import json import logging -from pytest import fixture, mark, yield_fixture +import pytest -from aws_lambda_powertools.logging.aws_lambda_logging import setup +from aws_lambda_powertools.logging.logger import Logger -@fixture +@pytest.fixture def stdout(): return io.StringIO() -@fixture +@pytest.fixture def handler(stdout): return logging.StreamHandler(stdout) -@fixture +@pytest.fixture def logger(): return logging.getLogger(__name__) -@yield_fixture +@pytest.fixture def root_logger(handler): logging.root.addHandler(handler) yield logging.root logging.root.removeHandler(handler) -@mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"]) -def test_setup_with_valid_log_levels(root_logger, logger, stdout, level): - setup(level, request_id="request id!", another="value") +@pytest.mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"]) +def test_setup_with_valid_log_levels(root_logger, stdout, level): + logger = Logger(level=level, stream=stdout, request_id="request id!", another="value") + msg = "This is a test" + log_command = { + "INFO": logger.info, + "ERROR": logger.error, + "WARNING": logger.warning, + "DEBUG": logger.debug, + "CRITICAL": logger.critical, + } - logger.critical("This is a test") + log_message = log_command[level] + log_message(msg) - log_dict = json.loads(stdout.getvalue()) + log_dict = json.loads(stdout.getvalue().strip()) check_log_dict(log_dict) - assert "CRITICAL" == log_dict["level"] + assert level == log_dict["level"] assert "This is a test" == log_dict["message"] assert "request id!" == log_dict["request_id"] assert "exception" not in log_dict -def test_logging_exception_traceback(root_logger, logger, stdout): - setup("DEBUG", request_id="request id!", another="value") +def test_logging_exception_traceback(root_logger, stdout): + logger = Logger(level="DEBUG", stream=stdout, request_id="request id!", another="value") try: raise Exception("Boom") @@ -61,11 +70,9 @@ def test_logging_exception_traceback(root_logger, logger, stdout): def test_setup_with_invalid_log_level(root_logger, logger, stdout): - setup("not a valid log level") # writes a log event - - log_dict = json.loads(stdout.getvalue()) - - check_log_dict(log_dict) + with pytest.raises(ValueError) as e: + Logger(level="not a valid log level") + assert "Unknown level" in e.value.args[0] def check_log_dict(log_dict): @@ -76,11 +83,11 @@ def check_log_dict(log_dict): def test_setup_with_bad_level_does_not_fail(): - setup("DBGG", request_id="request id!", another="value") + Logger("DBGG", request_id="request id!", another="value") -def test_with_dict_message(root_logger, logger, stdout): - setup("DEBUG", another="value") +def test_with_dict_message(root_logger, stdout): + logger = Logger(level="DEBUG", stream=stdout) msg = {"x": "isx"} logger.critical(msg) @@ -90,25 +97,25 @@ def test_with_dict_message(root_logger, logger, stdout): assert msg == log_dict["message"] -def test_with_json_message(root_logger, logger, stdout): - setup("DEBUG", another="value") +def test_with_json_message(root_logger, stdout): + logger = Logger(stream=stdout) msg = {"x": "isx"} - logger.critical(json.dumps(msg)) + logger.info(json.dumps(msg)) log_dict = json.loads(stdout.getvalue()) assert msg == log_dict["message"] -def test_with_unserialisable_value_in_message(root_logger, logger, stdout): - setup("DEBUG", another="value") +def test_with_unserialisable_value_in_message(root_logger, stdout): + logger = Logger(level="DEBUG", stream=stdout) class X: pass msg = {"x": X()} - logger.critical(msg) + logger.debug(msg) log_dict = json.loads(stdout.getvalue()) diff --git a/python/tests/functional/test_logger.py b/python/tests/functional/test_logger.py index db8a09e8d57..c1a882f3320 100644 --- a/python/tests/functional/test_logger.py +++ b/python/tests/functional/test_logger.py @@ -5,7 +5,10 @@ import pytest -from aws_lambda_powertools.logging import MetricUnit, log_metric, logger_inject_lambda_context, logger_setup +from aws_lambda_powertools.logging import Logger, MetricUnit, log_metric, logger_inject_lambda_context, logger_setup +from aws_lambda_powertools.logging.exceptions import InvalidLoggerSamplingRateError +from aws_lambda_powertools.logging.logger import JsonFormatter, set_package_logger +from aws_lambda_powertools.tracing import Tracer @pytest.fixture @@ -42,40 +45,40 @@ def test_setup_service_name(root_logger, stdout): # WHEN logger is setup # THEN service field should be equals service given service_name = "payment" - logger = logger_setup(service=service_name) + logger = Logger(service=service_name, stream=stdout) + logger.info("Hello") log = json.loads(stdout.getvalue()) assert service_name == log["service"] -def test_setup_no_service_name(root_logger, stdout): +def test_setup_no_service_name(stdout): # GIVEN no service is explicitly defined # WHEN logger is setup # THEN service field should be "service_undefined" - logger_setup() - logger = logger_setup() + logger = Logger(stream=stdout) logger.info("Hello") log = json.loads(stdout.getvalue()) assert "service_undefined" == log["service"] -def test_setup_service_env_var(monkeypatch, root_logger, stdout): +def test_setup_service_env_var(monkeypatch, stdout): # GIVEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env # WHEN logger is setup # THEN service field should be equals POWERTOOLS_SERVICE_NAME value service_name = "payment" monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name) - logger = logger_setup() + logger = Logger(stream=stdout) logger.info("Hello") log = json.loads(stdout.getvalue()) assert service_name == log["service"] -def test_setup_sampling_rate(monkeypatch, root_logger, stdout): +def test_setup_sampling_rate(monkeypatch, stdout): # GIVEN samping rate is explicitly defined via POWERTOOLS_LOGGER_SAMPLE_RATE env # WHEN logger is setup # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value and should sample debug logs @@ -84,7 +87,7 @@ def test_setup_sampling_rate(monkeypatch, root_logger, stdout): monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate) monkeypatch.setenv("LOG_LEVEL", "INFO") - logger = logger_setup() + logger = Logger(stream=stdout) logger.debug("I am being sampled") log = json.loads(stdout.getvalue()) @@ -93,7 +96,7 @@ def test_setup_sampling_rate(monkeypatch, root_logger, stdout): assert "I am being sampled" == log["message"] -def test_inject_lambda_context(root_logger, stdout, lambda_context): +def test_inject_lambda_context(lambda_context, stdout): # GIVEN a lambda function is decorated with logger # WHEN logger is setup # THEN lambda contextual info should always be in the logs @@ -104,9 +107,9 @@ def test_inject_lambda_context(root_logger, stdout, lambda_context): "function_request_id", ) - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): logger.info("Hello") @@ -118,15 +121,16 @@ def handler(event, context): assert key in log -def test_inject_lambda_context_log_event_request(root_logger, stdout, lambda_context): +def test_inject_lambda_context_log_event_request(lambda_context, stdout): # GIVEN a lambda function is decorated with logger instructed to log event # WHEN logger is setup # THEN logger should log event received from Lambda lambda_event = {"greeting": "hello"} - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context(log_event=True) + @logger.inject_lambda_context(log_event=True) + # @logger.inject_lambda_context(log_event=True) def handler(event, context): logger.info("Hello") @@ -134,13 +138,12 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] logged_event, _ = logs assert "greeting" in logged_event["message"] -def test_inject_lambda_context_log_event_request_env_var(monkeypatch, root_logger, stdout, lambda_context): +def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout): # GIVEN a lambda function is decorated with logger instructed to log event # via POWERTOOLS_LOGGER_LOG_EVENT env # WHEN logger is setup @@ -148,9 +151,9 @@ def test_inject_lambda_context_log_event_request_env_var(monkeypatch, root_logge lambda_event = {"greeting": "hello"} monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true") - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): logger.info("Hello") @@ -158,8 +161,7 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] event = {} for log in logs: @@ -169,15 +171,15 @@ def handler(event, context): assert event == lambda_event -def test_inject_lambda_context_log_no_request_by_default(monkeypatch, root_logger, stdout, lambda_context): +def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout): # GIVEN a lambda function is decorated with logger # WHEN logger is setup # THEN logger should not log event received by lambda handler lambda_event = {"greeting": "hello"} - logger = logger_setup() + logger = Logger(stream=stdout) - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): logger.info("Hello") @@ -185,8 +187,7 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] event = {} for log in logs: @@ -196,7 +197,7 @@ def handler(event, context): assert event != lambda_event -def test_inject_lambda_cold_start(root_logger, stdout, lambda_context): +def test_inject_lambda_cold_start(lambda_context, stdout): # GIVEN a lambda function is decorated with logger, and called twice # WHEN logger is setup # THEN cold_start key should only be true in the first call @@ -208,12 +209,12 @@ def test_inject_lambda_cold_start(root_logger, stdout, lambda_context): # # since Lambda will only import our logger lib once per concurrent execution logger.is_cold_start = True - logger = logger_setup() + logger = Logger(stream=stdout) def custom_method(): logger.info("Hello from method") - @logger_inject_lambda_context + @logger.inject_lambda_context def handler(event, context): custom_method() logger.info("Hello") @@ -223,8 +224,7 @@ def handler(event, context): # Given that our string buffer has many log statements separated by newline \n # We need to clean it before we can assert on - stdout.seek(0) - logs = [json.loads(line.strip()) for line in stdout.readlines()] + logs = [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] first_log, second_log, third_log, fourth_log = logs # First execution @@ -298,15 +298,57 @@ def test_log_metric_partially_correct_args(capsys, invalid_input, expected): assert captured.out == expected +def test_package_logger(capsys): + + set_package_logger() + Tracer(disabled=True) + output = capsys.readouterr() + + assert "Tracing has been disabled" in output.out + + +def test_package_logger_format(stdout, capsys): + set_package_logger(stream=stdout, formatter=JsonFormatter(formatter="test")) + Tracer(disabled=True) + output = json.loads(stdout.getvalue().split("\n")[0]) + + assert "test" in output["formatter"] + + @pytest.mark.parametrize( "invalid_input,expected", [({"unit": "Blah"}, ValueError), ({"unit": None}, ValueError), ({}, TypeError)], ids=["invalid metric unit as str", "unit as None", "missing required unit"], ) -def test_log_metric_invalid_unit(invalid_input, expected): +def test_log_metric_invalid_unit(capsys, invalid_input, expected): # GIVEN invalid units are provided # WHEN log_metric is called # THEN ValueError exception should be raised with pytest.raises(expected): log_metric(name="test_metric", namespace="DemoApp", **invalid_input) + + +def test_logger_setup_deprecated(): + # Should be removed when GA + with pytest.raises(DeprecationWarning): + logger_setup() + + +def test_logger_inject_lambda_context_deprecated(): + # Should be removed when GA + with pytest.raises(DeprecationWarning): + logger_inject_lambda_context() + + +def test_logger_append_duplicated(stdout): + logger = Logger(stream=stdout, request_id="value") + logger.structure_logs(append=True, request_id="new_value") + logger.info("log") + log = json.loads(stdout.getvalue()) + assert "new_value" == log["request_id"] + + +def test_logger_invalid_sampling_rate(): + with pytest.raises(InvalidLoggerSamplingRateError): + Logger(sampling_rate="TEST") diff --git a/python/tests/functional/test_metrics.py b/python/tests/functional/test_metrics.py index 0feaf3303ff..703c8788ac1 100644 --- a/python/tests/functional/test_metrics.py +++ b/python/tests/functional/test_metrics.py @@ -57,15 +57,16 @@ def a_hundred_metrics() -> List[Dict[str, str]]: def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Dict) -> Dict: """ Helper function to build EMF object from a list of metrics, dimensions """ - my_metrics = MetricManager() - for metric in metrics: - my_metrics.add_metric(**metric) - + my_metrics = Metrics() for dimension in dimensions: my_metrics.add_dimension(**dimension) my_metrics.add_namespace(**namespace) - return my_metrics.serialize_metric_set() + for metric in metrics: + my_metrics.add_metric(**metric) + + if len(metrics) != 100: + return my_metrics.serialize_metric_set() def serialize_single_metric(metric: Dict, dimension: Dict, namespace: Dict) -> Dict: @@ -173,10 +174,10 @@ def test_namespace_env_var(monkeypatch, capsys, metric, dimension, namespace): assert expected["_aws"] == output["_aws"] -def test_metrics_spillover(capsys, metric, dimension, namespace, a_hundred_metrics): +def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace, a_hundred_metrics): my_metrics = Metrics() - my_metrics.add_namespace(**namespace) my_metrics.add_dimension(**dimension) + my_metrics.add_namespace(**namespace) for _metric in a_hundred_metrics: my_metrics.add_metric(**_metric) @@ -194,9 +195,9 @@ def lambda_handler(evt, handler): single_metric = json.loads(single_metric) expected_single_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) - expected_spillover_metrics = serialize_metrics( - metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace - ) + + serialize_metrics(metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace) + expected_spillover_metrics = json.loads(capsys.readouterr().out.strip()) remove_timestamp(metrics=[spillover_metrics, expected_spillover_metrics, single_metric, expected_single_metric]) @@ -264,3 +265,41 @@ def test_exceed_number_of_dimensions(metric, namespace): my_metric.add_namespace(**namespace) for dimension in dimensions: my_metric.add_dimension(**dimension) + + +def test_log_metrics_error_propagation(capsys, metric, dimension, namespace): + # GIVEN Metrics are serialized after handler execution + # WHEN If an error occurs and metrics have been added + # THEN we should log metrics and propagate exception up + my_metrics = Metrics() + + my_metrics.add_metric(**metric) + my_metrics.add_dimension(**dimension) + my_metrics.add_namespace(**namespace) + + @my_metrics.log_metrics + def lambda_handler(evt, context): + raise ValueError("Bubble up") + + with pytest.raises(ValueError): + lambda_handler({}, {}) + + output = json.loads(capsys.readouterr().out.strip()) + expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace) + + remove_timestamp(metrics=[output, expected]) # Timestamp will always be different + assert expected["_aws"] == output["_aws"] + + +def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace): + # GIVEN Metrics are serialized after handler execution + # WHEN If an error occurs and no metrics have been added + # THEN we should propagate exception up and raise SchemaValidationError + my_metrics = Metrics() + + @my_metrics.log_metrics + def lambda_handler(evt, context): + raise ValueError("Bubble up") + + with pytest.raises(SchemaValidationError): + lambda_handler({}, {}) diff --git a/python/tests/functional/test_utils.py b/python/tests/functional/test_middleware_factory.py similarity index 82% rename from python/tests/functional/test_utils.py rename to python/tests/functional/test_middleware_factory.py index 141acf9d96f..ee8078f801a 100644 --- a/python/tests/functional/test_utils.py +++ b/python/tests/functional/test_middleware_factory.py @@ -4,6 +4,7 @@ import pytest from aws_lambda_powertools.middleware_factory import lambda_handler_decorator +from aws_lambda_powertools.middleware_factory.exceptions import MiddlewareInvalidArgumentError @pytest.fixture @@ -115,8 +116,26 @@ def log_event(handler, event, context, log_event=False): print(json.dumps(event)) return handler(event, context) - with pytest.raises(TypeError): + with pytest.raises(MiddlewareInvalidArgumentError): @log_event(True) def lambda_handler(evt, ctx): return True + + +def test_factory_middleware_exception_propagation(say_bye_middleware, say_hi_middleware): + class CustomMiddlewareException(Exception): + pass + + @lambda_handler_decorator + def raise_middleware(handler, evt, ctx): + raise CustomMiddlewareException("Raise middleware exception") + + @say_bye_middleware + @raise_middleware + @say_hi_middleware + def lambda_handler(evt, ctx): + return "hello world" + + with pytest.raises(CustomMiddlewareException): + lambda_handler({}, {}) diff --git a/python/tests/unit/test_tracing.py b/python/tests/unit/test_tracing.py index a7b98389e33..f8f43de0bf4 100644 --- a/python/tests/unit/test_tracing.py +++ b/python/tests/unit/test_tracing.py @@ -153,3 +153,31 @@ def test_tracer_no_autopatch(patch_mock): # THEN tracer should not patch any module Tracer(disabled=True, auto_patch=False) assert patch_mock.call_count == 0 + + +def test_tracer_lambda_handler_empty_response_metadata(mocker, xray_stub): + put_metadata_mock = mocker.MagicMock() + xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=xray_provider) + + @tracer.capture_lambda_handler + def handler(event, context): + return + + handler({}, mocker.MagicMock()) + + assert put_metadata_mock.call_count == 0 + + +def test_tracer_method_empty_response_metadata(mocker, xray_stub): + put_metadata_mock = mocker.MagicMock() + xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=xray_provider) + + @tracer.capture_method + def greeting(name, message): + return + + greeting(name="Foo", message="Bar") + + assert put_metadata_mock.call_count == 0 From 5729b6da762cbcd85a67f7d4b48f29e16762c107 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 24 Apr 2020 17:48:54 +0100 Subject: [PATCH 13/21] chore: bump example to use 0.8.0 features --- python/example/hello_world/app.py | 12 +++++++++--- python/example/template.yaml | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/python/example/hello_world/app.py b/python/example/hello_world/app.py index 8836b542476..e503dc8362f 100644 --- a/python/example/hello_world/app.py +++ b/python/example/hello_world/app.py @@ -2,13 +2,16 @@ import requests -from aws_lambda_powertools.logging import logger_inject_lambda_context, logger_setup +from aws_lambda_powertools.logging import Logger from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric from aws_lambda_powertools.middleware_factory import lambda_handler_decorator from aws_lambda_powertools.tracing import Tracer +from aws_lambda_powertools.logging.logger import set_package_logger + +set_package_logger() # Enable package diagnostics (DEBUG log) tracer = Tracer() -logger = logger_setup() +logger = Logger() metrics = Metrics() _cold_start = True @@ -31,7 +34,7 @@ def my_middleware(handler, event, context, say_hello=False): @metrics.log_metrics @tracer.capture_lambda_handler @my_middleware(say_hello=True) -@logger_inject_lambda_context +@logger.inject_lambda_context def lambda_handler(event, context): """Sample pure Lambda function @@ -53,6 +56,9 @@ def lambda_handler(event, context): Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html """ + if "charge_id" in event: + logger.structure_logs(append=True, payment_id="charge_id") + global _cold_start if _cold_start: logger.debug("Recording cold start metric") diff --git a/python/example/template.yaml b/python/example/template.yaml index c3f53b108d1..485641e5f63 100644 --- a/python/example/template.yaml +++ b/python/example/template.yaml @@ -24,8 +24,8 @@ Resources: POWERTOOLS_TRACE_DISABLED: "false" # Explicitly disables tracing POWERTOOLS_LOGGER_LOG_EVENT: "false" # Logs incoming event POWERTOOLS_LOGGER_SAMPLE_RATE: "0" # Debug log sampling percentage - POWERTOOLS_METRICS_NAMESPACE: "Example" # Debug log sampling percentage - LOG_LEVEL: INFO # Log level (INFO, DEBUG, etc.) + POWERTOOLS_METRICS_NAMESPACE: "Example" # Metric Namespace + LOG_LEVEL: INFO # Log level (INFO, DEBUG, etc.) Events: HelloWorld: Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api From 2aac986038cf9dd1b7ad3f3b7e360aac8ff0a2e4 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Mon, 27 Apr 2020 09:50:55 +0100 Subject: [PATCH 14/21] fix: #24 correct example test and docs --- python/example/README.md | 4 +++- .../example/tests/{unit => }/test_handler.py | 22 ++++++++++++++----- 2 files changed, 19 insertions(+), 7 deletions(-) rename python/example/tests/{unit => }/test_handler.py (82%) diff --git a/python/example/README.md b/python/example/README.md index 23b21ace8cd..4fec2cad50e 100644 --- a/python/example/README.md +++ b/python/example/README.md @@ -9,7 +9,9 @@ This example uses both [tracing](https://github.com/awslabs/aws-lambda-powertool * **Deploy**: `sam deploy --guided` * **Unit Tests**: We recommend proceeding with the following commands in a virtual environment - **Install deps**: `pip install -r hello_world/requirements.txt && pip install -r requirements-dev.txt` - - **Run tests with tracing disabled**: `POWERTOOLS_TRACE_DISABLED=1 python -m pytest` + - **Run tests with tracing disabled and namespace set** + - `POWERTOOLS_METRICS_NAMESPACE="Example" POWERTOOLS_TRACE_DISABLED=1 python -m pytest` + - Both are necessary because `app.py` initializes them in the global scope, since both Tracer and Metrics will be initialized and configured during import time. For unit tests, we could always patch and explicitly config but env vars do just fine for this example. # Example code diff --git a/python/example/tests/unit/test_handler.py b/python/example/tests/test_handler.py similarity index 82% rename from python/example/tests/unit/test_handler.py rename to python/example/tests/test_handler.py index 91a903330ac..f5447a8a81a 100644 --- a/python/example/tests/unit/test_handler.py +++ b/python/example/tests/test_handler.py @@ -5,7 +5,6 @@ from hello_world import app - @pytest.fixture() def apigw_event(): """ Generates API GW Event""" @@ -69,13 +68,24 @@ class Context: invoked_function_arn: str = "arn:aws:lambda:eu-west-1:298026489:function:test" aws_request_id: str = "5b441b59-a550-11c8-6564-f1c833cf438c" -def test_lambda_handler(apigw_event, mocker): - - +def test_lambda_handler(apigw_event, mocker, capsys): ret = app.lambda_handler(apigw_event, Context()) data = json.loads(ret["body"]) + output = capsys.readouterr() + output = output.out.split('\n') + stdout_one_string = '\t'.join(output) + assert ret["statusCode"] == 200 - assert "message" in ret["body"] assert data["message"] == "hello world" - # assert "location" in data.dict_keys() + assert "location" in data + assert "message" in ret["body"] + + # assess custom metric was flushed in stdout/logs + assert "SuccessfulLocations" in stdout_one_string + assert "ColdStart" in stdout_one_string + assert "UniqueMetricDimension" in stdout_one_string + + # assess our custom middleware ran + assert "Logging response after Handler is called" in stdout_one_string + assert "Logging event before Handler is called" in stdout_one_string From 70d927771176a221ca0d8e6e156cedd1187c6bff Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Fri, 1 May 2020 14:37:24 +0100 Subject: [PATCH 15/21] Bugfix: "per second" metric units (#27) * improv: test all metric units * fix: correct MetricUnit model values * fix: metric unit as string branch logic * chore: refactor * chore: test str * chore: bump version with metric unit patch Co-authored-by: heitorlessa --- python/HISTORY.md | 20 +++++++++ python/aws_lambda_powertools/helper/models.py | 22 ++++----- python/aws_lambda_powertools/metrics/base.py | 45 +++++++++++++++---- python/pyproject.toml | 2 +- python/tests/functional/test_metrics.py | 25 +++++++++++ 5 files changed, 94 insertions(+), 20 deletions(-) diff --git a/python/HISTORY.md b/python/HISTORY.md index 8001c9bba2c..69d85ad9f25 100644 --- a/python/HISTORY.md +++ b/python/HISTORY.md @@ -1,7 +1,27 @@ # HISTORY +## May 1st + +**0.8.1** + +* Fix metric unit casting logic if one passes plain string (value or key) +* Fix `MetricUnit` enum values for + - `BytesPerSecond` + - `KilobytesPerSecond` + - `MegabytesPerSecond` + - `GigabytesPerSecond` + - `TerabytesPerSecond` + - `BitsPerSecond` + - `KilobitsPerSecond` + - `MegabitsPerSecond` + - `GigabitsPerSecond` + - `TerabitsPerSecond` + - `CountPerSecond` + ## April 24th +**0.8.0** + * Introduces `Logger` for stuctured logging as a replacement for `logger_setup` * Introduces `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` * Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` diff --git a/python/aws_lambda_powertools/helper/models.py b/python/aws_lambda_powertools/helper/models.py index 424e5bf3aef..16a5df3669e 100644 --- a/python/aws_lambda_powertools/helper/models.py +++ b/python/aws_lambda_powertools/helper/models.py @@ -89,17 +89,17 @@ class MetricUnit(Enum): Terabits = "Terabits" Percent = "Percent" Count = "Count" - BytesPerSecond = "Second" - KilobytesPerSecond = "Second" - MegabytesPerSecond = "Second" - GigabytesPerSecond = "Second" - TerabytesPerSecond = "Second" - BitsPerSecond = "Second" - KilobitsPerSecond = "Second" - MegabitsPerSecond = "Second" - GigabitsPerSecond = "Second" - TerabitsPerSecond = "Second" - CountPerSecond = "Second" + BytesPerSecond = "Bytes/Second" + KilobytesPerSecond = "Kilobytes/Second" + MegabytesPerSecond = "Megabytes/Second" + GigabytesPerSecond = "Gigabytes/Second" + TerabytesPerSecond = "Terabytes/Second" + BitsPerSecond = "Bits/Second" + KilobitsPerSecond = "Kilobits/Second" + MegabitsPerSecond = "Megabits/Second" + GigabitsPerSecond = "Gigabits/Second" + TerabitsPerSecond = "Terabits/Second" + CountPerSecond = "Count/Second" def build_metric_unit_from_str(unit: Union[str, MetricUnit]) -> MetricUnit: diff --git a/python/aws_lambda_powertools/metrics/base.py b/python/aws_lambda_powertools/metrics/base.py index 8d561abf71d..38e513f19a8 100644 --- a/python/aws_lambda_powertools/metrics/base.py +++ b/python/aws_lambda_powertools/metrics/base.py @@ -53,6 +53,8 @@ def __init__(self, metric_set: Dict[str, str] = None, dimension_set: Dict = None self.metric_set = metric_set or {} self.dimension_set = dimension_set or {} self.namespace = os.getenv("POWERTOOLS_METRICS_NAMESPACE") or namespace + self._metric_units = [unit.value for unit in MetricUnit] + self._metric_unit_options = list(MetricUnit.__members__) def add_namespace(self, name: str): """Adds given metric namespace @@ -105,14 +107,8 @@ def add_metric(self, name: str, unit: MetricUnit, value: Union[float, int]): if not isinstance(value, numbers.Number): raise MetricValueError(f"{value} is not a valid number") - if not isinstance(unit, MetricUnit): - try: - unit = MetricUnit[unit] - except KeyError: - unit_options = list(MetricUnit.__members__) - raise MetricUnitError(f"Invalid metric unit '{unit}', expected either option: {unit_options}") - - metric = {"Unit": unit.value, "Value": float(value)} + unit = self.__extract_metric_unit_value(unit=unit) + metric = {"Unit": unit, "Value": float(value)} logger.debug(f"Adding metric: {name} with {metric}") self.metric_set[name] = metric @@ -205,3 +201,36 @@ def add_dimension(self, name: str, value: str): """ logger.debug(f"Adding dimension: {name}:{value}") self.dimension_set[name] = value + + def __extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: + """Return metric value from metric unit whether that's str or MetricUnit enum + + Parameters + ---------- + unit : Union[str, MetricUnit] + Metric unit + + Returns + ------- + str + Metric unit value (e.g. "Seconds", "Count/Second") + + Raises + ------ + MetricUnitError + When metric unit is not supported by CloudWatch + """ + + if isinstance(unit, str): + if unit in self._metric_unit_options: + unit = MetricUnit[unit].value + + if unit not in self._metric_units: # str correta + raise MetricUnitError( + f"Invalid metric unit '{unit}', expected either option: {self._metric_unit_options}" + ) + + if isinstance(unit, MetricUnit): + unit = unit.value + + return unit diff --git a/python/pyproject.toml b/python/pyproject.toml index 8b21c9dce86..99a3564ca22 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.8.0" +version = "0.8.1" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ diff --git a/python/tests/functional/test_metrics.py b/python/tests/functional/test_metrics.py index 703c8788ac1..7c6990668bc 100644 --- a/python/tests/functional/test_metrics.py +++ b/python/tests/functional/test_metrics.py @@ -303,3 +303,28 @@ def lambda_handler(evt, context): with pytest.raises(SchemaValidationError): lambda_handler({}, {}) + + +def test_all_metric_units_string(metric, dimension, namespace): + + # metric unit as MetricUnit key e.g. "Seconds", "BytesPerSecond" + for unit in MetricUnit: + metric["unit"] = unit.name + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) + + with pytest.raises(MetricUnitError): + metric["unit"] = "seconds" + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) + + all_metric_units = [unit.value for unit in MetricUnit] + + # metric unit as MetricUnit value e.g. "Seconds", "Bytes/Second" + for unit in all_metric_units: + metric["unit"] = unit + with single_metric(**metric) as my_metric: + my_metric.add_dimension(**dimension) + my_metric.add_namespace(**namespace) From 5ae77410a5bb9f1d6fa2d1c9003a608ad93d0b38 Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Tue, 12 May 2020 14:07:33 +0100 Subject: [PATCH 16/21] Improv tracer - async support, patch, test coverage and X-Ray escape hatch (#29) * feat: use new TraceProvider * improv: update tests * improv: update docs, linting * improv: docstring readability and links * improv: remove tracer provider * fix: patch modules type * improv: use client ctx_manager for race conditions * improv: make disabling provider private again * chore: linting * fix: race condition annotation/metadata * chore: linting * feat: add async support for methods * improv: document async use cases, and edge cases * improv: upgrade xray, flex pinning * chore: linting * improv: update example for async, escape hatch * fix: add example dev deps in project * improv: add patch_modules example, formatting * improv: break down concurrent async calls example * docs: main doc clean up * docs: document async, escape hatch usage * chore: lint * docs: update example SAM template comments * chore: updates poetry lock file * improv: example to use py 3.8 * fix: AsyncMockMixin not being awaitable in 3.8 * fix: 3.8 defaulting to AsyncMock * improv: include x-ray bug for concurrent async calls * fix: address nicolas's feedback * improv: add security baseline as part of PR process * improv: enforce lower code complexity * chore: whitespace * improv: add complexity baseline * chore: bump version to 0.9.0 * chore: clean up history changes Co-authored-by: heitorlessa --- python/.flake8 | 3 +- python/HISTORY.md | 31 +- python/Makefile | 11 +- python/README.md | 218 +++--- .../aws_lambda_powertools/logging/logger.py | 4 +- .../metrics/exceptions.py | 8 + .../middleware_factory/exceptions.py | 2 + .../middleware_factory/factory.py | 5 +- .../aws_lambda_powertools/tracing/__init__.py | 7 +- .../aws_lambda_powertools/tracing/tracer.py | 415 +++++++----- python/bandit.baseline | 226 +++++++ python/example/hello_world/app.py | 43 +- python/example/hello_world/requirements.txt | 4 +- python/example/template.yaml | 16 +- python/example/tests/test_handler.py | 1 + python/poetry.lock | 640 +++++++++++++++--- python/pyproject.toml | 16 +- python/tests/unit/test_tracing.py | 260 +++++-- 18 files changed, 1481 insertions(+), 429 deletions(-) create mode 100644 python/bandit.baseline diff --git a/python/.flake8 b/python/.flake8 index e55ad0bdf36..d5490be7893 100644 --- a/python/.flake8 +++ b/python/.flake8 @@ -2,7 +2,7 @@ exclude = docs, .eggs, setup.py, example, .aws-sam ignore = E203, E266, W503, BLK100, W291, I004 max-line-length = 120 -max-complexity = 18 +max-complexity = 15 [isort] multi_line_output = 3 @@ -10,4 +10,3 @@ include_trailing_comma = true force_grid_wrap = 0 use_parentheses = true line_length = 120 - diff --git a/python/HISTORY.md b/python/HISTORY.md index 69d85ad9f25..b90bb22ec96 100644 --- a/python/HISTORY.md +++ b/python/HISTORY.md @@ -1,11 +1,20 @@ # HISTORY +## May 12th + +**0.9.0** + +* **Tracer**: Support for async functions in `Tracer` via `capture_method` decorator +* **Tracer**: Support for `aiohttp` via `aiohttp_trace_config` trace config +* **Tracer**: Support for patching specific modules via `patch_modules` param +* **Tracer**: Document escape hatch mechanisms via `tracer.provider` + ## May 1st **0.8.1** -* Fix metric unit casting logic if one passes plain string (value or key) -* Fix `MetricUnit` enum values for +* **Metrics**: Fix metric unit casting logic if one passes plain string (value or key) +* **Metrics: **Fix `MetricUnit` enum values for - `BytesPerSecond` - `KilobytesPerSecond` - `MegabytesPerSecond` @@ -22,35 +31,35 @@ **0.8.0** -* Introduces `Logger` for stuctured logging as a replacement for `logger_setup` -* Introduces `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` -* Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` +* **Logger**: Introduces `Logger` class for stuctured logging as a replacement for `logger_setup` +* **Logger**: Introduces `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` +* **Logger**: Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` ## April 20th, 2020 **0.7.0** -* Introduces Middleware Factory to build your own middleware -* Fixes Metrics dimensions not being included correctly in EMF +* **Middleware factory**: Introduces Middleware Factory to build your own middleware via `lambda_handler_decorator` +* **Metrics**: Fixes metrics dimensions not being included correctly in EMF ## April 9th, 2020 **0.6.3** -* Fix `log_metrics` decorator logic not calling the decorated function, and exception handling +* **Logger**: Fix `log_metrics` decorator logic not calling the decorated function, and exception handling ## April 8th, 2020 **0.6.1** -* Introduces Metrics middleware to utilise CloudWatch Embedded Metric Format -* Adds deprecation warning for `log_metrics` +* **Metrics**: Introduces Metrics middleware to utilise CloudWatch Embedded Metric Format +* **Metrics**: Adds deprecation warning for `log_metrics` ## February 20th, 2020 **0.5.0** -* Introduces log sampling for debug - Thanks to [Danilo's contribution](https://github.com/awslabs/aws-lambda-powertools/pull/7) +* **Logger**: Introduces log sampling for debug - Thanks to [Danilo's contribution](https://github.com/awslabs/aws-lambda-powertools/pull/7) ## November 15th, 2019 diff --git a/python/Makefile b/python/Makefile index fac2a8af791..91dca5e6583 100644 --- a/python/Makefile +++ b/python/Makefile @@ -20,7 +20,7 @@ test: coverage-html: poetry run pytest --cov-report html -pr: lint test +pr: lint test security-baseline complexity-baseline build: pr poetry run build @@ -31,6 +31,15 @@ docs: dev docs-dev: poetry run pdoc --http : aws_lambda_powertools +security-baseline: + poetry run bandit --baseline bandit.baseline -r aws_lambda_powertools + +complexity-baseline: + $(info Maintenability index) + poetry run radon mi aws_lambda_powertools + $(info Cyclomatic complexity index) + poetry run xenon --max-absolute C --max-modules A --max-average A aws_lambda_powertools + # # Use `poetry version /` for version bump # diff --git a/python/README.md b/python/README.md index ff22f799077..ab0efc45dbd 100644 --- a/python/README.md +++ b/python/README.md @@ -2,38 +2,38 @@ ![PackageStatus](https://img.shields.io/static/v1?label=status&message=beta&color=blueviolet?style=flat-square) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) ![Build](https://github.com/awslabs/aws-lambda-powertools/workflows/Powertools%20Python/badge.svg?branch=master) -A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier - Currently available for Python only and compatible with Python >=3.6. +A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging, and creating custom metrics asynchronously easier - Compatible with Python >=3.6. -**Status**: Beta +> During beta, this library may change its API/methods, or environment variables as it receives feedback from customers. -## Features +* **Status**: Beta +* **How long until GA?**: [Current progress](https://github.com/awslabs/aws-lambda-powertools/projects/1) -**Tracing** +## Features -> It currently uses AWS X-Ray +**[Tracing](###Tracing)** -* Decorators that capture cold start as annotation, and response and exceptions as metadata +* Capture cold start as annotation, and response and exceptions as metadata * Run functions locally with SAM CLI without code change to disable tracing * Explicitly disable tracing via env var `POWERTOOLS_TRACE_DISABLED="true"` +* Support tracing async methods -**Logging** +**[Logging](###Logging)** -* Decorators that capture key fields from Lambda context, cold start and structures logging output as JSON -* Optionally log Lambda request when instructed (disabled by default) +* Capture key fields from Lambda context, cold start and structures logging output as JSON +* Log Lambda event when instructed (disabled by default) - Enable via `POWERTOOLS_LOGGER_LOG_EVENT="true"` or explicitly via decorator param -* Logs canonical custom metric line to logs that can be consumed asynchronously * Log sampling enables DEBUG log level for a percentage of requests (disabled by default) - Enable via `POWERTOOLS_LOGGER_SAMPLE_RATE=0.1`, ranges from 0 to 1, where 0.1 is 10% and 1 is 100% -* Append additional keys to structured log at any point in time so they're available across log statements +* Append additional keys to structured log at any point in time -**Metrics** +**[Metrics](###Metrics)** * Aggregate up to 100 metrics using a single CloudWatch Embedded Metric Format object (large JSON blob) * Context manager to create an one off metric with a different dimension than metrics already aggregated * Validate against common metric definitions mistakes (metric unit, values, max dimensions, max metrics, etc) -* No stack, custom resource, data collection needed — Metrics are created async by CloudWatch EMF -**Bring your own middleware** +**[Bring your own middleware](###Bring-your-own-middleware)** * Utility to easily create your own middleware * Run logic before, after, and handle exceptions @@ -45,34 +45,24 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, Environment variable | Description | Default | Utility ------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------- POWERTOOLS_SERVICE_NAME | Sets service name used for tracing namespace, metrics dimensions and structured logging | "service_undefined" | all -POWERTOOLS_TRACE_DISABLED | Disables tracing | "false" | tracing -POWERTOOLS_TRACE_MIDDLEWARES | Creates sub-segment for each middleware created by lambda_handler_decorator | "false" | middleware_factory -POWERTOOLS_LOGGER_LOG_EVENT | Logs incoming event | "false" | logging -POWERTOOLS_LOGGER_SAMPLE_RATE | Debug log sampling | 0 | logging -POWERTOOLS_METRICS_NAMESPACE | Metrics namespace | None | metrics -LOG_LEVEL | Sets logging level | "INFO" | logging +POWERTOOLS_TRACE_DISABLED | Disables tracing | "false" | [Tracing](###Tracing) +POWERTOOLS_TRACE_MIDDLEWARES | Creates sub-segment for each middleware created by lambda_handler_decorator | "false" | [middleware_factory](###Bring-your-own-middleware) +POWERTOOLS_LOGGER_LOG_EVENT | Logs incoming event | "false" | [Logging](###Logging) +POWERTOOLS_LOGGER_SAMPLE_RATE | Debug log sampling | 0 | [Logging](###Logging) +POWERTOOLS_METRICS_NAMESPACE | Metrics namespace | None | [Metrics](###Metrics) +LOG_LEVEL | Sets logging level | "INFO" | [Logging](###Logging) ## Usage +See **[example](./example/README.md)** of all features, testing, and a SAM template with all Powertools env vars. All features also provide full docs, and code completion for VSCode and PyCharm. + ### Installation With [pip](https://pip.pypa.io/en/latest/index.html) installed, run: ``pip install aws-lambda-powertools`` ### Tracing -**Example SAM template using supported environment variables** - -```yaml -Globals: - Function: - Tracing: Active # can also be enabled per function - Environment: - Variables: - POWERTOOLS_SERVICE_NAME: "payment" - POWERTOOLS_TRACE_DISABLED: "false" -``` - -**Pseudo Python Lambda code** +#### Tracing Lambda handler and a function ```python from aws_lambda_powertools.tracing import Tracer @@ -81,10 +71,8 @@ tracer = Tracer() @tracer.capture_method def collect_payment(charge_id): - # logic - ret = requests.post(PAYMENT_ENDPOINT) - # custom annotation - tracer.put_annotation("PAYMENT_STATUS", "SUCCESS") + ret = requests.post(PAYMENT_ENDPOINT) # logic + tracer.put_annotation("PAYMENT_STATUS", "SUCCESS") # custom annotation return ret @tracer.capture_lambda_handler @@ -94,7 +82,100 @@ def handler(event, context) ... ``` -**Fetching a pre-configured tracer anywhere** +#### Tracing asynchronous functions + +```python +import asyncio + +from aws_lambda_powertools.tracing import Tracer +tracer = Tracer() +# tracer = Tracer(service="payment") # can also be explicitly defined + +@tracer.capture_method +async def collect_payment(charge_id): + ... + +@tracer.capture_lambda_handler +def handler(event, context) + charge_id = event.get('charge_id') + payment = asyncio.run(collect_payment(charge_id)) # python 3.7+ + ... +``` + +#### Tracing concurrent asynchronous with gather + +:warning: This will no longer be necessary after [this X-Ray recorder issue is resolved](https://github.com/aws/aws-xray-sdk-python/issues/164) as it's an edge case. :warning: + +To safely workaround this issue, use `@tracer.capture_method` on functions not being run with `async.gather`, and instead use `in_subsegment_async` context manager escape hatch to have the same tracing effect. + + +```python +import asyncio + +from aws_lambda_powertools.tracing import Tracer +tracer = Tracer() +# tracer = Tracer(service="payment") # can also be explicitly defined + +async def another_async_task(): + async with tracer.provider.in_subsegment_async("## another_async_task"): + ... + +async def another_async_task_2(): + async with tracer.provider.in_subsegment_async("## another_async_task_2"): + ... + +@tracer.capture_method +async def collect_payment(charge_id): + asyncio.gather(another_async_task(), another_async_task_2()) + ... + +@tracer.capture_lambda_handler +def handler(event, context) + charge_id = event.get('charge_id') + payment = asyncio.run(collect_payment(charge_id)) # python 3.7+ + ... +``` + +#### Using escape hatch mechanisms + +You can use `tracer.provider` attribute to access all methods provided by `xray_recorder`. This is useful when you need a feature available in X-Ray that is not available in the Tracer middleware, for example [thread-safe](https://github.com/aws/aws-xray-sdk-python/#user-content-trace-threadpoolexecutor), or [context managers](https://github.com/aws/aws-xray-sdk-python/#user-content-start-a-custom-segmentsubsegment). + +**Example using aiohttp with an async context manager** + +```python +import asyncio +import aiohttp + +from aws_lambda_powertools.tracing import Tracer, aiohttp_trace_config +tracer = Tracer() + +# aiohttp_trace_config is x-ray extension for aiohttp trace config known as aws_xray_trace_config + +async def aiohttp_task(): + # Async context manager as opposed to `@tracer.capture_method` + async with tracer.provider.in_subsegment_async("## aiohttp escape hatch"): + async with aiohttp.ClientSession(trace_configs=[aiohttp_trace_config()]) as session: + async with session.get("https://httpbin.org/json") as resp: + resp = await resp.json() + return resp + +@tracer.capture_method +async def async_tasks(): + ret = await aiohttp_task() + ... + + return { + "task": "done", + **ret + } + +@tracer.capture_lambda_handler +def handler(event, context) + ret = asyncio.run(async_tasks()) # python 3.7+ + ... +``` + +#### Using a pre-configured tracer anywhere ```python # handler.py @@ -114,21 +195,7 @@ tracer = Tracer(auto_patch=False) # new instance using existing configuration wi ### Logging -> **NOTE** `logger_setup` and `logger_inject_lambda_context` are deprecated and will be completely removed once it's GA. - -**Example SAM template using supported environment variables** - -```yaml -Globals: - Function: - Environment: - Variables: - POWERTOOLS_SERVICE_NAME: "payment" - POWERTOOLS_LOGGER_SAMPLE_RATE: 0.1 # enable debug logging for 1% of requests, 0% by default - LOG_LEVEL: "INFO" -``` - -**Pseudo Python Lambda code** +#### Structuring logs with Lambda context info ```python from aws_lambda_powertools.logging import Logger @@ -148,7 +215,8 @@ def handler(event, context) ... ``` -**Exerpt output in CloudWatch Logs** +
+Exerpt output in CloudWatch Logs ```json { @@ -182,8 +250,9 @@ def handler(event, context) } } ``` +
-**Append additional keys to structured log** +#### Appending additional keys to current logger ```python from aws_lambda_powertools.logging import Logger @@ -198,7 +267,8 @@ def handler(event, context) ... ``` -**Exerpt output in CloudWatch Logs** +
+Exerpt output in CloudWatch Logs ```json { @@ -216,14 +286,11 @@ def handler(event, context) "message": "Collecting payment" } ``` +
-### Custom Metrics async - -> **NOTE** `log_metric` will be removed once it's GA. +### Metrics -This feature makes use of CloudWatch Embedded Metric Format (EMF) and metrics are created asynchronously by CloudWatch service - -> Contrary to `log_metric`, you don't need any custom resource or additional CloudFormation stack anymore. +This feature makes use of CloudWatch Embedded Metric Format (EMF), and metrics are created asynchronously by CloudWatch service. Metrics middleware validates against the minimum necessary for a metric to be published: @@ -232,9 +299,9 @@ Metrics middleware validates against the minimum necessary for a metric to be pu * Only one Namespace * [Any Metric unit supported by CloudWatch](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html) -**Creating multiple metrics** +#### Creating multiple metrics -`log_metrics` decorator calls the decorated function, so leave that for last decorator or will fail with `SchemaValidationError` if no metrics are recorded. +If using multiple middlewares, use `log_metrics` as the last decorator, or else it will fail with `SchemaValidationError` if no metrics are recorded. ```python from aws_lambda_powertools.metrics import Metrics, MetricUnit @@ -267,11 +334,9 @@ with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1) as metric: metric.add_dimension(name="function_context", value="$LATEST") ``` -> **NOTE**: If you want to instantiate Metrics() in multiple places in your code, make sure to use `POWERTOOLS_METRICS_NAMESPACE` env var as we don't keep a copy of that across instances. - -### Utilities +> **NOTE**: When using Metrics() in multiple places in your code, make sure to use `POWERTOOLS_METRICS_NAMESPACE` env var, or setting namespace param. -#### Bring your own middleware +### Bring your own middleware This feature allows you to create your own middleware as a decorator with ease by following a simple signature. @@ -279,7 +344,7 @@ This feature allows you to create your own middleware as a decorator with ease b * Always return the handler with event/context or response if executed - Supports nested middleware/decorators use case -**Middleware with no params** +#### Middleware with no params ```python from aws_lambda_powertools.middleware_factory import lambda_handler_decorator @@ -307,7 +372,7 @@ def lambda_handler(event, context): return True ``` -**Middleware with params** +#### Middleware with params ```python @lambda_handler_decorator @@ -325,9 +390,9 @@ def lambda_handler(event, context): return True ``` -**Optionally trace middleware execution** +#### Tracing middleware execution -This makes use of an existing Tracer instance that you may have initialized anywhere in your code, otherwise it'll initialize one using default options and provider (X-Ray). +This makes use of an existing Tracer instance that you may have initialized anywhere in your code. If no Tracer instance is found, it'll initialize one using default options. ```python from aws_lambda_powertools.middleware_factory import lambda_handler_decorator @@ -359,19 +424,12 @@ def lambda_handler(event, context): return True ``` - ### Debug mode -By default, all debug log statements from AWS Lambda Powertools package are suppressed. If you'd like to enable them, use `set_package_logger` utility: +By default, all log statements from AWS Lambda Powertools package are suppressed. If you'd like to enable them, use `set_package_logger` utility: ```python import aws_lambda_powertools aws_lambda_powertools.logging.logger.set_package_logger() ... ``` - -## Beta - -This library may change its API/methods or environment variables as it receives feedback from customers - -**[Progress towards GA](https://github.com/awslabs/aws-lambda-powertools/projects/1)** diff --git a/python/aws_lambda_powertools/logging/logger.py b/python/aws_lambda_powertools/logging/logger.py index 02c0e912b12..bbcf6589060 100644 --- a/python/aws_lambda_powertools/logging/logger.py +++ b/python/aws_lambda_powertools/logging/logger.py @@ -402,7 +402,7 @@ def inject_lambda_context(self, lambda_handler: Callable[[Dict, Any], Any] = Non @logger.inject_lambda_context def handler(event, context): - logger.info("Hello") + logger.info("Hello") **Captures Lambda contextual runtime info and logs incoming request** @@ -412,7 +412,7 @@ def handler(event, context): @logger.inject_lambda_context(log_event=True) def handler(event, context): - logger.info("Hello") + logger.info("Hello") Returns ------- diff --git a/python/aws_lambda_powertools/metrics/exceptions.py b/python/aws_lambda_powertools/metrics/exceptions.py index b9b1107e747..88a38c24229 100644 --- a/python/aws_lambda_powertools/metrics/exceptions.py +++ b/python/aws_lambda_powertools/metrics/exceptions.py @@ -1,14 +1,22 @@ class MetricUnitError(Exception): + """When metric unit is not supported by CloudWatch""" + pass class SchemaValidationError(Exception): + """When serialization fail schema validation""" + pass class MetricValueError(Exception): + """When metric value isn't a valid number""" + pass class UniqueNamespaceError(Exception): + """When an additional namespace is set""" + pass diff --git a/python/aws_lambda_powertools/middleware_factory/exceptions.py b/python/aws_lambda_powertools/middleware_factory/exceptions.py index 55d5b2342bb..4d807b85384 100644 --- a/python/aws_lambda_powertools/middleware_factory/exceptions.py +++ b/python/aws_lambda_powertools/middleware_factory/exceptions.py @@ -1,2 +1,4 @@ class MiddlewareInvalidArgumentError(Exception): + """When middleware receives non keyword=arguments""" + pass diff --git a/python/aws_lambda_powertools/middleware_factory/factory.py b/python/aws_lambda_powertools/middleware_factory/factory.py index 43c8e5ad9fa..de781dc22c3 100644 --- a/python/aws_lambda_powertools/middleware_factory/factory.py +++ b/python/aws_lambda_powertools/middleware_factory/factory.py @@ -124,9 +124,8 @@ def wrapper(event, context): middleware = functools.partial(decorator, func, event, context, **kwargs) if trace_execution: tracer = Tracer(auto_patch=False) - tracer.create_subsegment(name=f"## {decorator.__qualname__}") - response = middleware() - tracer.end_subsegment() + with tracer.provider.in_subsegment(name=f"## {decorator.__qualname__}"): + response = middleware() else: response = middleware() return response diff --git a/python/aws_lambda_powertools/tracing/__init__.py b/python/aws_lambda_powertools/tracing/__init__.py index 136fccce9f8..ece90f7d1bc 100644 --- a/python/aws_lambda_powertools/tracing/__init__.py +++ b/python/aws_lambda_powertools/tracing/__init__.py @@ -1,5 +1,10 @@ """Tracing utility """ +from aws_xray_sdk.ext.aiohttp.client import aws_xray_trace_config as aiohttp_trace_config + from .tracer import Tracer -__all__ = ["Tracer"] +aiohttp_trace_config.__doc__ = "aiohttp extension for X-Ray (aws_xray_trace_config)" + + +__all__ = ["Tracer", "aiohttp_trace_config"] diff --git a/python/aws_lambda_powertools/tracing/tracer.py b/python/aws_lambda_powertools/tracing/tracer.py index 19cc319492f..022d8ef89a2 100644 --- a/python/aws_lambda_powertools/tracing/tracer.py +++ b/python/aws_lambda_powertools/tracing/tracer.py @@ -1,11 +1,14 @@ +import asyncio import copy import functools +import inspect import logging import os from distutils.util import strtobool -from typing import Any, Callable, Dict +from typing import Any, Callable, Dict, List, Tuple -from aws_xray_sdk.core import models, patch_all, xray_recorder +import aws_xray_sdk +import aws_xray_sdk.core is_cold_start = True logger = logging.getLogger(__name__) @@ -39,8 +42,10 @@ class Tracer: auto_patch: bool Patch existing imported modules during initialization, by default True disabled: bool - Flag to explicitly disable tracing, useful when running/testing locally. + Flag to explicitly disable tracing, useful when running/testing locally `Env POWERTOOLS_TRACE_DISABLED="true"` + patch_modules: Tuple[str] + Tuple of modules supported by tracing provider to patch, by default all modules are patched Example ------- @@ -70,7 +75,7 @@ def handler(event: dict, context: Any) -> Dict: def confirm_booking(booking_id: str) -> Dict: resp = add_confirmation(booking_id) - tracer.put_annotation("BookingConfirmation", resp['requestId']) + tracer.put_annotation("BookingConfirmation", resp["requestId"]) tracer.put_metadata("Booking confirmation", resp) return resp @@ -78,7 +83,8 @@ def confirm_booking(booking_id: str) -> Dict: @tracer.capture_lambda_handler def handler(event: dict, context: Any) -> Dict: print("Received event from Lambda...") - response = greeting(name="Heitor") + booking_id = event.get("booking_id") + response = confirm_booking(booking_id=booking_id) return response **A Lambda function using service name via POWERTOOLS_SERVICE_NAME** @@ -115,17 +121,29 @@ def handler(event: dict, context: Any) -> Dict: Limitations ----------- - * Async handler and methods not supported - + * Async handler not supported """ - _default_config = {"service": "service_undefined", "disabled": False, "provider": xray_recorder, "auto_patch": True} + _default_config = { + "service": "service_undefined", + "disabled": False, + "auto_patch": True, + "patch_modules": None, + "provider": aws_xray_sdk.core.xray_recorder, + } _config = copy.copy(_default_config) def __init__( - self, service: str = None, disabled: bool = None, provider: xray_recorder = None, auto_patch: bool = None + self, + service: str = None, + disabled: bool = None, + auto_patch: bool = None, + patch_modules: List = None, + provider: aws_xray_sdk.core.xray_recorder = None, ): - self.__build_config(service=service, disabled=disabled, provider=provider, auto_patch=auto_patch) + self.__build_config( + service=service, disabled=disabled, auto_patch=auto_patch, patch_modules=patch_modules, provider=provider + ) self.provider = self._config["provider"] self.disabled = self._config["disabled"] self.service = self._config["service"] @@ -135,7 +153,78 @@ def __init__( self.__disable_tracing_provider() if self.auto_patch: - self.patch() + self.patch(modules=patch_modules) + + def put_annotation(self, key: str, value: Any): + """Adds annotation to existing segment or subsegment + + Example + ------- + Custom annotation for a pseudo service named payment + + tracer = Tracer(service="payment") + tracer.put_annotation("PaymentStatus", "CONFIRMED") + + Parameters + ---------- + key : str + Annotation key (e.g. PaymentStatus) + value : any + Value for annotation (e.g. "CONFIRMED") + """ + if self.disabled: + logger.debug("Tracing has been disabled, aborting put_annotation") + return + + logger.debug(f"Annotating on key '{key}' with '{value}'") + self.provider.put_annotation(key=key, value=value) + + def put_metadata(self, key: str, value: Any, namespace: str = None): + """Adds metadata to existing segment or subsegment + + Parameters + ---------- + key : str + Metadata key + value : any + Value for metadata + namespace : str, optional + Namespace that metadata will lie under, by default None + + Example + ------- + Custom metadata for a pseudo service named payment + + tracer = Tracer(service="payment") + response = collect_payment() + tracer.put_metadata("Payment collection", response) + """ + if self.disabled: + logger.debug("Tracing has been disabled, aborting put_metadata") + return + + namespace = namespace or self.service + logger.debug(f"Adding metadata on key '{key}' with '{value}' at namespace '{namespace}'") + self.provider.put_metadata(key=key, value=value, namespace=namespace) + + def patch(self, modules: Tuple[str] = None): + """Patch modules for instrumentation. + + Patches all supported modules by default if none are given. + + Parameters + ---------- + modules : Tuple[str] + List of modules to be patched, optional by default + """ + if self.disabled: + logger.debug("Tracing has been disabled, aborting patch") + return + + if modules is None: + aws_xray_sdk.core.patch_all() + else: + aws_xray_sdk.core.patch(modules) def capture_lambda_handler(self, lambda_handler: Callable[[Dict, Any], Any] = None): """Decorator to create subsegment for lambda handlers @@ -164,23 +253,28 @@ def handler(event, context) @functools.wraps(lambda_handler) def decorate(event, context): - self.create_subsegment(name=f"## {lambda_handler.__name__}") - - try: - logger.debug("Calling lambda handler") - response = lambda_handler(event, context) - logger.debug("Received lambda handler response successfully") - logger.debug(response) - if response: - self.put_metadata("lambda handler response", response) - except Exception as err: - logger.exception("Exception received from lambda handler", exc_info=True) - self.put_metadata(f"{self.service}_error", err) - raise - finally: - self.end_subsegment() - - return response + with self.provider.in_subsegment(name=f"## {lambda_handler.__name__}") as subsegment: + global is_cold_start + if is_cold_start: + logger.debug("Annotating cold start") + subsegment.put_annotation(key="ColdStart", value=True) + is_cold_start = False + + try: + logger.debug("Calling lambda handler") + response = lambda_handler(event, context) + logger.debug("Received lambda handler response successfully") + logger.debug(response) + if response: + subsegment.put_metadata( + key="lambda handler response", value=response, namespace=self._config["service"] + ) + except Exception as err: + logger.exception("Exception received from lambda handler", exc_info=True) + subsegment.put_metadata(key=f"{self.service} error", value=err, namespace=self._config["service"]) + raise + + return response return decorate @@ -190,6 +284,14 @@ def capture_method(self, method: Callable = None): It also captures both response and exceptions as metadata and creates a subsegment named `## ` + When running [async functions concurrently](https://docs.python.org/3/library/asyncio-task.html#id6), + methods may impact each others subsegment, and can trigger + and AlreadyEndedException from X-Ray due to async nature. + + For this use case, either use `capture_method` only where + `async.gather` is called, or use `in_subsegment_async` + context manager via our escape hatch mechanism - See examples. + Example ------- **Custom function using capture_method decorator** @@ -198,162 +300,167 @@ def capture_method(self, method: Callable = None): @tracer.capture_method def some_function() - Parameters - ---------- - method : Callable - Method to annotate on + **Custom async method using capture_method decorator** - Raises - ------ - err - Exception raised by method - """ + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer(service="booking") - @functools.wraps(method) - def decorate(*args, **kwargs): - method_name = f"{method.__name__}" - self.create_subsegment(name=f"## {method_name}") + @tracer.capture_method + async def confirm_booking(booking_id: str) -> Dict: + resp = call_to_booking_service() - try: - logger.debug(f"Calling method: {method_name}") - response = method(*args, **kwargs) - logger.debug(f"Received {method_name} response successfully") - logger.debug(response) - if response is not None: - self.put_metadata(f"{method_name} response", response) - except Exception as err: - logger.exception(f"Exception received from '{method_name}'' method", exc_info=True) - self.put_metadata(f"{method_name} error", err) - raise - finally: - self.end_subsegment() + tracer.put_annotation("BookingConfirmation", resp["requestId"]) + tracer.put_metadata("Booking confirmation", resp) - return response + return resp - return decorate + def lambda_handler(event: dict, context: Any) -> Dict: + booking_id = event.get("booking_id") + asyncio.run(confirm_booking(booking_id=booking_id)) - def put_annotation(self, key: str, value: Any): - """Adds annotation to existing segment or subsegment + **Tracing nested async calls** - Example - ------- - Custom annotation for a pseudo service named payment + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer(service="booking") - tracer = Tracer(service="payment") - tracer.put_annotation("PaymentStatus", "CONFIRMED") + @tracer.capture_method + async def get_identity(): + ... - Parameters - ---------- - key : str - Annotation key (e.g. PaymentStatus) - value : Any - Value for annotation (e.g. "CONFIRMED") - """ - # Will no longer be needed once #155 is resolved - # https://github.com/aws/aws-xray-sdk-python/issues/155 - if self.disabled: - return + @tracer.capture_method + async def long_async_call(): + ... - logger.debug(f"Annotating on key '{key}'' with '{value}''") - self.provider.put_annotation(key=key, value=value) + @tracer.capture_method + async def async_tasks(): + await get_identity() + ret = await long_async_call() - def put_metadata(self, key: str, value: object, namespace: str = None): - """Adds metadata to existing segment or subsegment + return { "task": "done", **ret } - Parameters - ---------- - key : str - Metadata key - value : object - Value for metadata - namespace : str, optional - Namespace that metadata will lie under, by default None + **Safely tracing concurrent async calls with decorator** - Example - ------- - Custom metadata for a pseudo service named payment + This may not needed once [this bug is closed](https://github.com/aws/aws-xray-sdk-python/issues/164) - tracer = Tracer(service="payment") - response = collect_payment() - tracer.put_metadata("Payment collection", response) - """ - # Will no longer be needed once #155 is resolved - # https://github.com/aws/aws-xray-sdk-python/issues/155 - if self.disabled: - return + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer(service="booking") - _namespace = namespace or self.service - logger.debug(f"Adding metadata on key '{key}'' with '{value}'' at namespace '{namespace}''") - self.provider.put_metadata(key=key, value=value, namespace=_namespace) + async def get_identity(): + async with aioboto3.client("sts") as sts: + account = await sts.get_caller_identity() + return account - def create_subsegment(self, name: str) -> models.subsegment: - """Creates subsegment or a dummy segment plus subsegment if tracing is disabled + async def long_async_call(): + ... - It also assumes Tracer would be instantiated statically so that cold starts are captured. + @tracer.capture_method + async def async_tasks(): + _, ret = await asyncio.gather(get_identity(), long_async_call(), return_exceptions=True) - Parameters - ---------- - name : str - Subsegment name + return { "task": "done", **ret } - Example - ------- - Creates a genuine subsegment + **Safely tracing each concurrent async calls with escape hatch** - self.create_subsegment(name="a meaningful name") + This may not needed once [this bug is closed](https://github.com/aws/aws-xray-sdk-python/issues/164) - Returns - ------- - models.subsegment - AWS X-Ray Subsegment - """ - # Will no longer be needed once #155 is resolved - # https://github.com/aws/aws-xray-sdk-python/issues/155 - subsegment = None + from aws_lambda_powertools.tracing import Tracer + tracer = Tracer(service="booking") - if self.disabled: - logger.debug("Tracing has been disabled, return dummy subsegment instead") - segment = models.dummy_entities.DummySegment() - subsegment = models.dummy_entities.DummySubsegment(segment) - else: - subsegment = self.provider.begin_subsegment(name=name) - global is_cold_start - if is_cold_start: - logger.debug("Annotating cold start") - subsegment.put_annotation("ColdStart", True) - is_cold_start = False + async def get_identity(): + async tracer.provider.in_subsegment_async("## get_identity"): + ... - return subsegment + async def long_async_call(): + async tracer.provider.in_subsegment_async("## long_async_call"): + ... + + @tracer.capture_method + async def async_tasks(): + _, ret = await asyncio.gather(get_identity(), long_async_call(), return_exceptions=True) - def end_subsegment(self): - """Ends an existing subsegment + return { "task": "done", **ret } Parameters ---------- - subsegment : models.subsegment - Subsegment previously created + method : Callable + Method to annotate on + + Raises + ------ + err + Exception raised by method """ - if self.disabled: - logger.debug("Tracing has been disabled, return instead") - return + method_name = f"{method.__name__}" + + async def decorate_logic( + decorated_method_with_args: functools.partial = None, + subsegment: aws_xray_sdk.core.models.subsegment = None, + coroutine: bool = False, + ) -> Any: + """Decorate logic runs both sync and async decorated methods + + Parameters + ---------- + decorated_method_with_args : functools.partial + Partial decorated method with arguments/keyword arguments + subsegment : aws_xray_sdk.core.models.subsegment + X-Ray subsegment to reuse + coroutine : bool, optional + Instruct whether partial decorated method is a wrapped coroutine, by default False + + Returns + ------- + Any + Returns method's response + """ + response = None + try: + logger.debug(f"Calling method: {method_name}") + if coroutine: + response = await decorated_method_with_args() + else: + response = decorated_method_with_args() + logger.debug(f"Received {method_name} response successfully") + logger.debug(response) + except Exception as err: + logger.exception(f"Exception received from '{method_name}' method", exc_info=True) + subsegment.put_metadata(key=f"{method_name} error", value=err, namespace=self._config["service"]) + raise + finally: + if response is not None: + subsegment.put_metadata( # pragma: no cover + key=f"{method_name} response", value=response, namespace=self._config["service"] + ) - self.provider.end_subsegment() + return response - def patch(self): - """Patch modules for instrumentation""" - logger.debug("Patching modules...") + if inspect.iscoroutinefunction(method): - if self.disabled: - logger.debug("Tracing has been disabled, aborting patch") - return + @functools.wraps(method) + async def decorate(*args, **kwargs): + decorated_method_with_args = functools.partial(method, *args, **kwargs) + async with self.provider.in_subsegment_async(name=f"## {method_name}") as subsegment: + return await decorate_logic( + decorated_method_with_args=decorated_method_with_args, subsegment=subsegment, coroutine=True + ) - patch_all() # pragma: no cover + else: - def __disable_tracing_provider(self): - """Forcefully disables tracing and patching""" - from aws_xray_sdk import global_sdk_config + @functools.wraps(method) + def decorate(*args, **kwargs): + loop = asyncio.get_event_loop() + decorated_method_with_args = functools.partial(method, *args, **kwargs) + with self.provider.in_subsegment(name=f"## {method_name}") as subsegment: + return loop.run_until_complete( + decorate_logic(decorated_method_with_args=decorated_method_with_args, subsegment=subsegment) + ) + + return decorate - global_sdk_config.set_sdk_enabled(False) + def __disable_tracing_provider(self): + """Forcefully disables tracing""" + logger.debug("Disabling tracer provider...") + aws_xray_sdk.global_sdk_config.set_sdk_enabled(False) def __is_trace_disabled(self) -> bool: """Detects whether trace has been disabled @@ -384,7 +491,12 @@ def __is_trace_disabled(self) -> bool: return False def __build_config( - self, service: str = None, disabled: bool = None, provider: xray_recorder = None, auto_patch: bool = None + self, + service: str = None, + disabled: bool = None, + auto_patch: bool = None, + patch_modules: List = None, + provider: aws_xray_sdk.core.xray_recorder = None, ): """ Populates Tracer config for new and existing initializations """ is_disabled = disabled if disabled is not None else self.__is_trace_disabled() @@ -394,6 +506,7 @@ def __build_config( self._config["auto_patch"] = auto_patch if auto_patch is not None else self._config["auto_patch"] self._config["service"] = is_service if is_service else self._config["service"] self._config["disabled"] = is_disabled if is_disabled else self._config["disabled"] + self._config["patch_modules"] = patch_modules if patch_modules else self._config["patch_modules"] @classmethod def _reset_config(cls): diff --git a/python/bandit.baseline b/python/bandit.baseline new file mode 100644 index 00000000000..a989733b930 --- /dev/null +++ b/python/bandit.baseline @@ -0,0 +1,226 @@ +{ + "errors": [], + "generated_at": "2020-05-12T08:59:59Z", + "metrics": { + "_totals": { + "CONFIDENCE.HIGH": 1.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 1.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 1375, + "nosec": 0 + }, + "aws_lambda_powertools/__init__.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 6, + "nosec": 0 + }, + "aws_lambda_powertools/helper/__init__.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 2, + "nosec": 0 + }, + "aws_lambda_powertools/helper/models.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 108, + "nosec": 0 + }, + "aws_lambda_powertools/logging/__init__.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 5, + "nosec": 0 + }, + "aws_lambda_powertools/logging/exceptions.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 2, + "nosec": 0 + }, + "aws_lambda_powertools/logging/logger.py": { + "CONFIDENCE.HIGH": 1.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 1.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 375, + "nosec": 0 + }, + "aws_lambda_powertools/metrics/__init__.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 15, + "nosec": 0 + }, + "aws_lambda_powertools/metrics/base.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 162, + "nosec": 0 + }, + "aws_lambda_powertools/metrics/exceptions.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 12, + "nosec": 0 + }, + "aws_lambda_powertools/metrics/metric.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 90, + "nosec": 0 + }, + "aws_lambda_powertools/metrics/metrics.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 82, + "nosec": 0 + }, + "aws_lambda_powertools/middleware_factory/__init__.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 3, + "nosec": 0 + }, + "aws_lambda_powertools/middleware_factory/exceptions.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 3, + "nosec": 0 + }, + "aws_lambda_powertools/middleware_factory/factory.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 103, + "nosec": 0 + }, + "aws_lambda_powertools/tracing/__init__.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 6, + "nosec": 0 + }, + "aws_lambda_powertools/tracing/tracer.py": { + "CONFIDENCE.HIGH": 0.0, + "CONFIDENCE.LOW": 0.0, + "CONFIDENCE.MEDIUM": 0.0, + "CONFIDENCE.UNDEFINED": 0.0, + "SEVERITY.HIGH": 0.0, + "SEVERITY.LOW": 0.0, + "SEVERITY.MEDIUM": 0.0, + "SEVERITY.UNDEFINED": 0.0, + "loc": 401, + "nosec": 0 + } + }, + "results": [ + { + "code": "369 try:\n370 if self.sampling_rate and random.random() <= float(self.sampling_rate):\n371 logger.debug(\"Setting log level to Debug due to sampling rate\")\n", + "filename": "aws_lambda_powertools/logging/logger.py", + "issue_confidence": "HIGH", + "issue_severity": "LOW", + "issue_text": "Standard pseudo-random generators are not suitable for security/cryptographic purposes.", + "line_number": 370, + "line_range": [ + 370 + ], + "more_info": "https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random", + "test_id": "B311", + "test_name": "blacklist" + } + ] +} \ No newline at end of file diff --git a/python/example/hello_world/app.py b/python/example/hello_world/app.py index e503dc8362f..35f44de67d2 100644 --- a/python/example/hello_world/app.py +++ b/python/example/hello_world/app.py @@ -1,16 +1,20 @@ +import asyncio import json +import aioboto3 +import aiohttp import requests from aws_lambda_powertools.logging import Logger +from aws_lambda_powertools.logging.logger import set_package_logger from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric from aws_lambda_powertools.middleware_factory import lambda_handler_decorator -from aws_lambda_powertools.tracing import Tracer -from aws_lambda_powertools.logging.logger import set_package_logger +from aws_lambda_powertools.tracing import Tracer, aiohttp_trace_config -set_package_logger() # Enable package diagnostics (DEBUG log) +set_package_logger() # Enable package diagnostics (DEBUG log) -tracer = Tracer() +# tracer = Tracer() # patches all available modules +tracer = Tracer(patch_modules=("aioboto3", "boto3", "requests")) # ~90-100ms faster in perf depending on set of libs logger = Logger() metrics = Metrics() @@ -19,6 +23,30 @@ metrics.add_dimension(name="operation", value="example") +async def aioboto_task(): + async with aioboto3.client("sts") as sts: + account = await sts.get_caller_identity() + return account + + +async def aiohttp_task(): + # You have full access to all xray_recorder methods via `tracer.provider` + # these include thread-safe methods, all context managers, x-ray configuration etc. + # see https://github.com/aws/aws-xray-sdk-python/issues/164 + async with tracer.provider.in_subsegment_async("## aiohttp escape hatch"): + async with aiohttp.ClientSession(trace_configs=[aiohttp_trace_config()]) as session: + async with session.get("https://httpbin.org/json") as resp: + resp = await resp.json() + return resp + + +@tracer.capture_method +async def async_tasks(): + _, ret = await asyncio.gather(aioboto_task(), aiohttp_task(), return_exceptions=True) + + return {"task": "done", **ret} + + @lambda_handler_decorator(trace_execution=True) def my_middleware(handler, event, context, say_hello=False): if say_hello: @@ -56,6 +84,9 @@ def lambda_handler(event, context): Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html """ + + async_http_ret = asyncio.run(async_tasks()) + if "charge_id" in event: logger.structure_logs(append=True, payment_id="charge_id") @@ -77,8 +108,10 @@ def lambda_handler(event, context): with single_metric(name="UniqueMetricDimension", unit="Seconds", value=1) as metric: metric.add_dimension(name="unique_dimension", value="for_unique_metric") + resp = {"message": "hello world", "location": ip.text.replace("\n", ""), "async_http": async_http_ret} logger.info("Returning message to the caller") + return { "statusCode": 200, - "body": json.dumps({"message": "hello world", "location": ip.text.replace("\n", "")}), + "body": json.dumps(resp), } diff --git a/python/example/hello_world/requirements.txt b/python/example/hello_world/requirements.txt index 0241ab2efa6..f89f0010c01 100644 --- a/python/example/hello_world/requirements.txt +++ b/python/example/hello_world/requirements.txt @@ -1,2 +1,4 @@ aws-lambda-powertools -requests \ No newline at end of file +requests +aioboto3 +aiohttp diff --git a/python/example/template.yaml b/python/example/template.yaml index 485641e5f63..47267d729f5 100644 --- a/python/example/template.yaml +++ b/python/example/template.yaml @@ -8,7 +8,7 @@ Description: > # More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst Globals: Function: - Timeout: 3 + Timeout: 7 Resources: HelloWorldFunction: @@ -16,16 +16,16 @@ Resources: Properties: CodeUri: hello_world/ Handler: app.lambda_handler - Runtime: python3.7 - Tracing: Active + Runtime: python3.8 + Tracing: Active # enables X-Ray tracing Environment: Variables: - POWERTOOLS_SERVICE_NAME: example # Sets service name used for tracing namespace, metrics dimensions and structured logging - POWERTOOLS_TRACE_DISABLED: "false" # Explicitly disables tracing - POWERTOOLS_LOGGER_LOG_EVENT: "false" # Logs incoming event - POWERTOOLS_LOGGER_SAMPLE_RATE: "0" # Debug log sampling percentage + POWERTOOLS_SERVICE_NAME: example # Sets service name used for all middlewares, "service_undefined" by default + POWERTOOLS_TRACE_DISABLED: "false" # Explicitly disables tracing, default + POWERTOOLS_LOGGER_LOG_EVENT: "false" # Logs incoming event, default + POWERTOOLS_LOGGER_SAMPLE_RATE: "0" # Debug log sampling percentage, default POWERTOOLS_METRICS_NAMESPACE: "Example" # Metric Namespace - LOG_LEVEL: INFO # Log level (INFO, DEBUG, etc.) + LOG_LEVEL: INFO # Log level for Logger (INFO, DEBUG, etc.), default Events: HelloWorld: Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api diff --git a/python/example/tests/test_handler.py b/python/example/tests/test_handler.py index f5447a8a81a..909bb224c9a 100644 --- a/python/example/tests/test_handler.py +++ b/python/example/tests/test_handler.py @@ -80,6 +80,7 @@ def test_lambda_handler(apigw_event, mocker, capsys): assert data["message"] == "hello world" assert "location" in data assert "message" in ret["body"] + assert "async_http" in data # assess custom metric was flushed in stdout/logs assert "SuccessfulLocations" in stdout_one_string diff --git a/python/poetry.lock b/python/poetry.lock index 2ca7efeb460..07143b50fa7 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,10 +1,93 @@ +[[package]] +category = "dev" +description = "Async boto3 wrapper" +name = "aioboto3" +optional = false +python-versions = ">=3.6" +version = "8.0.3" + +[package.dependencies] +[package.dependencies.aiobotocore] +extras = ["boto3"] +version = "1.0.4" + +[package.extras] +s3cse = ["cryptography (>=2.3.1)"] + +[[package]] +category = "dev" +description = "Async client for aws services using botocore and aiohttp" +name = "aiobotocore" +optional = false +python-versions = ">=3.6" +version = "1.0.4" + +[package.dependencies] +aiohttp = ">=3.3.1" +aioitertools = ">=0.5.1" +botocore = ">=1.15.32,<1.15.33" +wrapt = ">=1.10.10" + +[package.dependencies.boto3] +optional = true +version = "1.12.32" + +[package.extras] +awscli = ["awscli (1.18.32)"] +boto3 = ["boto3 (1.12.32)"] + +[[package]] +category = "dev" +description = "Async http client/server framework (asyncio)" +name = "aiohttp" +optional = false +python-versions = ">=3.5.3" +version = "3.6.2" + +[package.dependencies] +async-timeout = ">=3.0,<4.0" +attrs = ">=17.3.0" +chardet = ">=2.0,<4.0" +multidict = ">=4.5,<5.0" +yarl = ">=1.0,<2.0" + +[package.dependencies.idna-ssl] +python = "<3.7" +version = ">=1.0" + +[package.dependencies.typing-extensions] +python = "<3.7" +version = ">=3.6.5" + +[package.extras] +speedups = ["aiodns", "brotlipy", "cchardet"] + +[[package]] +category = "dev" +description = "itertools and builtins for AsyncIO and mixed iterables" +name = "aioitertools" +optional = false +python-versions = ">=3.6" +version = "0.7.0" + +[package.dependencies] +typing_extensions = ">=3.7" + [[package]] category = "dev" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." name = "appdirs" optional = false python-versions = "*" -version = "1.4.3" +version = "1.4.4" + +[[package]] +category = "dev" +description = "Timeout context manager for asyncio programs" +name = "async-timeout" +optional = false +python-versions = ">=3.5.3" +version = "3.0.1" [[package]] category = "dev" @@ -13,7 +96,7 @@ marker = "sys_platform == \"win32\"" name = "atomicwrites" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.3.0" +version = "1.4.0" [[package]] category = "dev" @@ -43,6 +126,21 @@ future = "*" jsonpickle = "*" wrapt = "*" +[[package]] +category = "dev" +description = "Security oriented static analyser for python code." +name = "bandit" +optional = false +python-versions = "*" +version = "1.6.2" + +[package.dependencies] +GitPython = ">=1.0.1" +PyYAML = ">=3.13" +colorama = ">=0.3.9" +six = ">=1.10.0" +stevedore = ">=1.20.0" + [[package]] category = "dev" description = "The uncompromising code formatter." @@ -63,13 +161,26 @@ typed-ast = ">=1.4.0" [package.extras] d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] +[[package]] +category = "dev" +description = "The AWS SDK for Python" +name = "boto3" +optional = false +python-versions = "*" +version = "1.12.32" + +[package.dependencies] +botocore = ">=1.15.32,<1.16.0" +jmespath = ">=0.7.1,<1.0.0" +s3transfer = ">=0.3.0,<0.4.0" + [[package]] category = "main" description = "Low-level, data-driven core of boto 3." name = "botocore" optional = false python-versions = "*" -version = "1.15.41" +version = "1.15.32" [package.dependencies] docutils = ">=0.10,<0.16" @@ -80,6 +191,14 @@ python-dateutil = ">=2.1,<3.0.0" python = "<3.4.0 || >=3.5.0" version = ">=1.20,<1.26" +[[package]] +category = "dev" +description = "Python package for providing Mozilla's CA Bundle." +name = "certifi" +optional = false +python-versions = "*" +version = "2020.4.5.1" + [[package]] category = "dev" description = "Validate configuration and produce human readable error messages." @@ -88,22 +207,29 @@ optional = false python-versions = ">=3.6" version = "3.0.0" +[[package]] +category = "dev" +description = "Universal encoding detector for Python 2 and 3" +name = "chardet" +optional = false +python-versions = "*" +version = "3.0.4" + [[package]] category = "dev" description = "Composable command line interface toolkit" name = "click" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "7.1.1" +version = "7.1.2" [[package]] category = "dev" description = "Cross-platform colored terminal text." -marker = "sys_platform == \"win32\"" name = "colorama" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "0.4.3" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.4.1" [[package]] category = "dev" @@ -137,14 +263,6 @@ optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" version = "0.15.2" -[[package]] -category = "dev" -description = "Discover and load entry points from installed packages." -name = "entrypoints" -optional = false -python-versions = ">=2.7" -version = "0.3" - [[package]] category = "dev" description = "Removes commented-out code." @@ -174,17 +292,20 @@ version = "3.0.12" [[package]] category = "dev" -description = "the modular source code checker: pep8, pyflakes and co" +description = "the modular source code checker: pep8 pyflakes and co" name = "flake8" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "3.7.9" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "3.8.1" [package.dependencies] -entrypoints = ">=0.3.0,<0.4.0" mccabe = ">=0.6.0,<0.7.0" -pycodestyle = ">=2.5.0,<2.6.0" -pyflakes = ">=2.1.0,<2.2.0" +pycodestyle = ">=2.6.0a1,<2.7.0" +pyflakes = ">=2.2.0,<2.3.0" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" [[package]] category = "dev" @@ -257,11 +378,11 @@ description = "Flake8 plugin to find commented out code" name = "flake8-eradicate" optional = false python-versions = ">=3.6,<4.0" -version = "0.2.4" +version = "0.3.0" [package.dependencies] -attrs = ">=18.2,<20.0" -eradicate = ">=0.2.1,<1.1.0" +attrs = "*" +eradicate = ">=1.0,<2.0" flake8 = ">=3.5,<4.0" [[package]] @@ -291,6 +412,17 @@ version = ">=4.3.5" [package.extras] test = ["pytest"] +[[package]] +category = "dev" +description = "Polyfill package for Flake8 plugins" +name = "flake8-polyfill" +optional = false +python-versions = "*" +version = "1.0.2" + +[package.dependencies] +flake8 = "*" + [[package]] category = "dev" description = "A flake8 extension that helps to make more readable variables names" @@ -310,17 +442,59 @@ optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" version = "0.18.2" +[[package]] +category = "dev" +description = "Git Object Database" +name = "gitdb" +optional = false +python-versions = ">=3.4" +version = "4.0.5" + +[package.dependencies] +smmap = ">=3.0.1,<4" + +[[package]] +category = "dev" +description = "Python Git Library" +name = "gitpython" +optional = false +python-versions = ">=3.4" +version = "3.1.2" + +[package.dependencies] +gitdb = ">=4.0.1,<5" + [[package]] category = "dev" description = "File identification library for Python" name = "identify" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" -version = "1.4.14" +version = "1.4.15" [package.extras] license = ["editdistance"] +[[package]] +category = "dev" +description = "Internationalized Domain Names in Applications (IDNA)" +name = "idna" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.9" + +[[package]] +category = "dev" +description = "Patch ssl.match_hostname for Unicode(idna) domains support" +marker = "python_version < \"3.7\"" +name = "idna-ssl" +optional = false +python-versions = "*" +version = "1.1.0" + +[package.dependencies] +idna = ">=2.0" + [[package]] category = "main" description = "Read metadata from Python packages" @@ -343,7 +517,7 @@ marker = "python_version < \"3.7\"" name = "importlib-resources" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" -version = "1.4.0" +version = "1.5.0" [package.dependencies] [package.dependencies.importlib-metadata] @@ -385,7 +559,7 @@ description = "Python library for serializing any arbitrary object graph into JS name = "jsonpickle" optional = false python-versions = ">=2.7" -version = "1.4" +version = "1.4.1" [package.dependencies] importlib-metadata = "*" @@ -410,16 +584,32 @@ MarkupSafe = ">=0.9.2" babel = ["babel"] lingua = ["lingua"] +[[package]] +category = "dev" +description = "Create Python CLI apps with little to no effort at all!" +name = "mando" +optional = false +python-versions = "*" +version = "0.6.4" + +[package.dependencies] +six = "*" + +[package.extras] +restructuredText = ["rst2ansi"] + [[package]] category = "dev" description = "Python implementation of Markdown." name = "markdown" optional = false python-versions = ">=3.5" -version = "3.2.1" +version = "3.2.2" [package.dependencies] -setuptools = ">=36" +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" [package.extras] testing = ["coverage", "pyyaml"] @@ -448,6 +638,14 @@ optional = false python-versions = ">=3.5" version = "8.2.0" +[[package]] +category = "dev" +description = "multidict implementation" +name = "multidict" +optional = false +python-versions = ">=3.5" +version = "4.7.5" + [[package]] category = "dev" description = "Node.js virtual environment builder" @@ -476,6 +674,14 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" version = "0.8.0" +[[package]] +category = "dev" +description = "Python Build Reasonableness" +name = "pbr" +optional = false +python-versions = "*" +version = "5.4.5" + [[package]] category = "dev" description = "Auto-generate API documentation for Python projects." @@ -542,7 +748,7 @@ description = "Python style guide checker" name = "pycodestyle" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.5.0" +version = "2.6.0" [[package]] category = "dev" @@ -550,7 +756,7 @@ description = "passive checker of Python programs" name = "pyflakes" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.1.1" +version = "2.2.0" [[package]] category = "dev" @@ -566,7 +772,7 @@ description = "pytest: simple powerful testing with Python" name = "pytest" optional = false python-versions = ">=3.5" -version = "5.4.1" +version = "5.4.2" [package.dependencies] atomicwrites = ">=1.0" @@ -586,6 +792,20 @@ version = ">=0.12" checkqa-mypy = ["mypy (v0.761)"] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] +[[package]] +category = "dev" +description = "Pytest support for asyncio." +name = "pytest-asyncio" +optional = false +python-versions = ">= 3.5" +version = "0.12.0" + +[package.dependencies] +pytest = ">=5.4.0" + +[package.extras] +testing = ["async_generator (>=1.3)", "coverage", "hypothesis (>=5.7.1)"] + [[package]] category = "dev" description = "Pytest plugin for measuring coverage." @@ -634,13 +854,56 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" version = "5.3.1" +[[package]] +category = "dev" +description = "Code Metrics in Python" +name = "radon" +optional = false +python-versions = "*" +version = "4.1.0" + +[package.dependencies] +colorama = "0.4.1" +flake8-polyfill = "*" +future = "*" +mando = ">=0.6,<0.7" + [[package]] category = "dev" description = "Alternative regular expression module, to replace re." name = "regex" optional = false python-versions = "*" -version = "2020.4.4" +version = "2020.5.7" + +[[package]] +category = "dev" +description = "Python HTTP for Humans." +name = "requests" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.23.0" + +[package.dependencies] +certifi = ">=2017.4.17" +chardet = ">=3.0.2,<4" +idna = ">=2.5,<3" +urllib3 = ">=1.21.1,<1.25.0 || >1.25.0,<1.25.1 || >1.25.1,<1.26" + +[package.extras] +security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] +socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7)", "win-inet-pton"] + +[[package]] +category = "dev" +description = "An Amazon S3 Transfer Manager" +name = "s3transfer" +optional = false +python-versions = "*" +version = "0.3.3" + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" [[package]] category = "main" @@ -650,6 +913,26 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" version = "1.14.0" +[[package]] +category = "dev" +description = "A pure Python implementation of a sliding window memory map manager" +name = "smmap" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "3.0.4" + +[[package]] +category = "dev" +description = "Manage dynamic plugins for Python applications" +name = "stevedore" +optional = false +python-versions = "*" +version = "1.32.0" + +[package.dependencies] +pbr = ">=2.0.0,<2.1.0 || >2.1.0" +six = ">=1.10.0" + [[package]] category = "dev" description = "A collection of helpers and mock objects for unit tests and doc tests." @@ -679,10 +962,17 @@ optional = false python-versions = "*" version = "1.4.1" +[[package]] +category = "dev" +description = "Backported and Experimental Type Hints for Python 3.5+" +name = "typing-extensions" +optional = false +python-versions = "*" +version = "3.7.4.2" + [[package]] category = "main" description = "HTTP library with thread-safe connection pooling, file post, and more." -marker = "python_version != \"3.4\"" name = "urllib3" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" @@ -699,7 +989,7 @@ description = "Virtual Python Environment builder" name = "virtualenv" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" -version = "20.0.18" +version = "20.0.20" [package.dependencies] appdirs = ">=1.4.3,<2" @@ -716,8 +1006,8 @@ python = "<3.7" version = ">=1.0,<2" [package.extras] -docs = ["sphinx (>=2.0.0,<3)", "sphinx-argparse (>=0.2.5,<1)", "sphinx-rtd-theme (>=0.4.3,<1)", "towncrier (>=19.9.0rc1)", "proselint (>=0.10.2,<1)"] -testing = ["pytest (>=4.0.0,<6)", "coverage (>=4.5.1,<6)", "pytest-mock (>=2.0.0,<3)", "pytest-env (>=0.6.2,<1)", "pytest-timeout (>=1.3.4,<2)", "packaging (>=20.0)", "xonsh (>=0.9.16,<1)"] +docs = ["sphinx (>=3)", "sphinx-argparse (>=0.2.5)", "sphinx-rtd-theme (>=0.4.3)", "towncrier (>=19.9.0rc1)", "proselint (>=0.10.2)"] +testing = ["pytest (>=4)", "coverage (>=5)", "coverage-enable-subprocess (>=1)", "pytest-xdist (>=1.31.0)", "pytest-mock (>=2)", "pytest-env (>=0.6.2)", "pytest-randomly (>=1)", "pytest-timeout", "packaging (>=20.0)", "xonsh (>=0.9.16)"] [[package]] category = "dev" @@ -735,6 +1025,31 @@ optional = false python-versions = "*" version = "1.12.1" +[[package]] +category = "dev" +description = "Monitor code metrics for Python on your CI server" +name = "xenon" +optional = false +python-versions = "*" +version = "0.7.0" + +[package.dependencies] +PyYAML = ">=4.2b1,<6.0" +radon = ">=4,<5" +requests = ">=2.0,<3.0" + +[[package]] +category = "dev" +description = "Yet another URL library" +name = "yarl" +optional = false +python-versions = ">=3.5" +version = "1.4.2" + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + [[package]] category = "main" description = "Backport of pathlib-compatible object wrapper for zip files" @@ -749,17 +1064,47 @@ docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] testing = ["jaraco.itertools", "func-timeout"] [metadata] -content-hash = "227b1d21877d1391dc50a8733d507226afd95471e77554328f9b2a3c2403b7fe" +content-hash = "525f4150dc764e0fa82b790ada43514e328c26e0e3e90e26103b038ce0bd896e" python-versions = "^3.6" [metadata.files] +aioboto3 = [ + {file = "aioboto3-8.0.3-py2.py3-none-any.whl", hash = "sha256:b3fd112406dac77cbc4ec6457bd53bff6fb9ef13d58e440a66bd60e405d229ef"}, + {file = "aioboto3-8.0.3.tar.gz", hash = "sha256:1650a9c478d2d11cf7d48a2b72754b27713154675084d0c837c8d99ff8b070fc"}, +] +aiobotocore = [ + {file = "aiobotocore-1.0.4-py3-none-any.whl", hash = "sha256:1e89ef97c52eb77d89c7c4a9130cab162ae3b89d2709c6e45da30824163ed8d3"}, + {file = "aiobotocore-1.0.4.tar.gz", hash = "sha256:4103d90b9e162176203dc5295124b15f56c37eee0ddbcddc6929760443714ff8"}, +] +aiohttp = [ + {file = "aiohttp-3.6.2-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:1e984191d1ec186881ffaed4581092ba04f7c61582a177b187d3a2f07ed9719e"}, + {file = "aiohttp-3.6.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:50aaad128e6ac62e7bf7bd1f0c0a24bc968a0c0590a726d5a955af193544bcec"}, + {file = "aiohttp-3.6.2-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:65f31b622af739a802ca6fd1a3076fd0ae523f8485c52924a89561ba10c49b48"}, + {file = "aiohttp-3.6.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:ae55bac364c405caa23a4f2d6cfecc6a0daada500274ffca4a9230e7129eac59"}, + {file = "aiohttp-3.6.2-cp36-cp36m-win32.whl", hash = "sha256:344c780466b73095a72c616fac5ea9c4665add7fc129f285fbdbca3cccf4612a"}, + {file = "aiohttp-3.6.2-cp36-cp36m-win_amd64.whl", hash = "sha256:4c6efd824d44ae697814a2a85604d8e992b875462c6655da161ff18fd4f29f17"}, + {file = "aiohttp-3.6.2-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:2f4d1a4fdce595c947162333353d4a44952a724fba9ca3205a3df99a33d1307a"}, + {file = "aiohttp-3.6.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:6206a135d072f88da3e71cc501c59d5abffa9d0bb43269a6dcd28d66bfafdbdd"}, + {file = "aiohttp-3.6.2-cp37-cp37m-win32.whl", hash = "sha256:b778ce0c909a2653741cb4b1ac7015b5c130ab9c897611df43ae6a58523cb965"}, + {file = "aiohttp-3.6.2-cp37-cp37m-win_amd64.whl", hash = "sha256:32e5f3b7e511aa850829fbe5aa32eb455e5534eaa4b1ce93231d00e2f76e5654"}, + {file = "aiohttp-3.6.2-py3-none-any.whl", hash = "sha256:460bd4237d2dbecc3b5ed57e122992f60188afe46e7319116da5eb8a9dfedba4"}, + {file = "aiohttp-3.6.2.tar.gz", hash = "sha256:259ab809ff0727d0e834ac5e8a283dc5e3e0ecc30c4d80b3cd17a4139ce1f326"}, +] +aioitertools = [ + {file = "aioitertools-0.7.0-py3-none-any.whl", hash = "sha256:e931a2f0dcabd4a8446b5cc2fc71b8bb14716e6adf37728a70869213f1f741cd"}, + {file = "aioitertools-0.7.0.tar.gz", hash = "sha256:341cb05a0903177ef1b73d4cc12c92aee18e81c364e0138f4efc7ec3c47b8177"}, +] appdirs = [ - {file = "appdirs-1.4.3-py2.py3-none-any.whl", hash = "sha256:d8b24664561d0d34ddfaec54636d502d7cea6e29c3eaf68f3df6180863e2166e"}, - {file = "appdirs-1.4.3.tar.gz", hash = "sha256:9e5896d1372858f8dd3344faf4e5014d21849c756c8d5701f78f8a103b372d92"}, + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] +async-timeout = [ + {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"}, + {file = "async_timeout-3.0.1-py3-none-any.whl", hash = "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3"}, ] atomicwrites = [ - {file = "atomicwrites-1.3.0-py2.py3-none-any.whl", hash = "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4"}, - {file = "atomicwrites-1.3.0.tar.gz", hash = "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"}, + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, ] attrs = [ {file = "attrs-19.3.0-py2.py3-none-any.whl", hash = "sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c"}, @@ -769,25 +1114,41 @@ aws-xray-sdk = [ {file = "aws-xray-sdk-2.5.0.tar.gz", hash = "sha256:8dfa785305fc8dc720d8d4c2ec6a58e85e467ddc3a53b1506a2ed8b5801c8fc7"}, {file = "aws_xray_sdk-2.5.0-py2.py3-none-any.whl", hash = "sha256:ae57baeb175993bdbf31f83843e2c0958dd5aa8cb691ab5628aafb6ccc78a0fc"}, ] +bandit = [ + {file = "bandit-1.6.2-py2.py3-none-any.whl", hash = "sha256:336620e220cf2d3115877685e264477ff9d9abaeb0afe3dc7264f55fa17a3952"}, + {file = "bandit-1.6.2.tar.gz", hash = "sha256:41e75315853507aa145d62a78a2a6c5e3240fe14ee7c601459d0df9418196065"}, +] black = [ {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, ] +boto3 = [ + {file = "boto3-1.12.32-py2.py3-none-any.whl", hash = "sha256:57398de1b5e074e715c866441e69f90c9468959d5743a021d8aeed04fbaa1078"}, + {file = "boto3-1.12.32.tar.gz", hash = "sha256:60ac1124597231ed36a7320547cd0d16a001bb92333ab30ad20514f77e585225"}, +] botocore = [ - {file = "botocore-1.15.41-py2.py3-none-any.whl", hash = "sha256:b12a5b642aa210a72d84204da18618276eeae052fbff58958f57d28ef3193034"}, - {file = "botocore-1.15.41.tar.gz", hash = "sha256:a45a65ba036bc980decfc3ce6c2688a2d5fffd76e4b02ea4d59e63ff0f6896d4"}, + {file = "botocore-1.15.32-py2.py3-none-any.whl", hash = "sha256:a963af564d94107787ff3d2c534e8b7aed7f12e014cdd609f8fcb17bf9d9b19a"}, + {file = "botocore-1.15.32.tar.gz", hash = "sha256:3ea89601ee452b65084005278bd832be854cfde5166685dcb14b6c8f19d3fc6d"}, +] +certifi = [ + {file = "certifi-2020.4.5.1-py2.py3-none-any.whl", hash = "sha256:1d987a998c75633c40847cc966fcf5904906c920a7f17ef374f5aa4282abd304"}, + {file = "certifi-2020.4.5.1.tar.gz", hash = "sha256:51fcb31174be6e6664c5f69e3e1691a2d72a1a12e90f872cbdb1567eb47b6519"}, ] cfgv = [ {file = "cfgv-3.0.0-py2.py3-none-any.whl", hash = "sha256:f22b426ed59cd2ab2b54ff96608d846c33dfb8766a67f0b4a6ce130ce244414f"}, {file = "cfgv-3.0.0.tar.gz", hash = "sha256:04b093b14ddf9fd4d17c53ebfd55582d27b76ed30050193c14e560770c5360eb"}, ] +chardet = [ + {file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"}, + {file = "chardet-3.0.4.tar.gz", hash = "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"}, +] click = [ - {file = "click-7.1.1-py2.py3-none-any.whl", hash = "sha256:e345d143d80bf5ee7534056164e5e112ea5e22716bbb1ce727941f4c8b471b9a"}, - {file = "click-7.1.1.tar.gz", hash = "sha256:8a18b4ea89d8820c5d0c7da8a64b2c324b4dabb695804dbfea19b9be9d88c0cc"}, + {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"}, + {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"}, ] colorama = [ - {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, - {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, + {file = "colorama-0.4.1-py2.py3-none-any.whl", hash = "sha256:f8ac84de7840f5b9c4e3347b3c1eaa50f7e49c2b07596221daec5edaabbd7c48"}, + {file = "colorama-0.4.1.tar.gz", hash = "sha256:05eed71e2e327246ad6b38c540c4a3117230b19679b875190486ddd2d721422d"}, ] coverage = [ {file = "coverage-5.1-cp27-cp27m-macosx_10_12_x86_64.whl", hash = "sha256:0cb4be7e784dcdc050fc58ef05b71aa8e89b7e6636b99967fadbdba694cf2b65"}, @@ -830,10 +1191,6 @@ docutils = [ {file = "docutils-0.15.2-py3-none-any.whl", hash = "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0"}, {file = "docutils-0.15.2.tar.gz", hash = "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99"}, ] -entrypoints = [ - {file = "entrypoints-0.3-py2.py3-none-any.whl", hash = "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19"}, - {file = "entrypoints-0.3.tar.gz", hash = "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451"}, -] eradicate = [ {file = "eradicate-1.0.tar.gz", hash = "sha256:4ffda82aae6fd49dfffa777a857cb758d77502a1f2e0f54c9ac5155a39d2d01a"}, ] @@ -846,8 +1203,8 @@ filelock = [ {file = "filelock-3.0.12.tar.gz", hash = "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59"}, ] flake8 = [ - {file = "flake8-3.7.9-py2.py3-none-any.whl", hash = "sha256:49356e766643ad15072a789a20915d3c91dc89fd313ccd71802303fd67e4deca"}, - {file = "flake8-3.7.9.tar.gz", hash = "sha256:45681a117ecc81e870cbf1262835ae4af5e7a8b08e40b944a8a6e6b895914cfb"}, + {file = "flake8-3.8.1-py2.py3-none-any.whl", hash = "sha256:6c1193b0c3f853ef763969238f6c81e9e63ace9d024518edc020d5f1d6d93195"}, + {file = "flake8-3.8.1.tar.gz", hash = "sha256:ea6623797bf9a52f4c9577d780da0bb17d65f870213f7b5bcc9fca82540c31d5"}, ] flake8-black = [ {file = "flake8-black-0.1.1.tar.gz", hash = "sha256:56f85aaa5a83f06a3f61e680e3b50f156b5e557ebdcb964d823d86f4c108b0c8"}, @@ -868,8 +1225,8 @@ flake8-debugger = [ {file = "flake8-debugger-3.2.1.tar.gz", hash = "sha256:712d7c1ff69ddf3f0130e94cc88c2519e720760bce45e8c330bfdcb61ab4090d"}, ] flake8-eradicate = [ - {file = "flake8-eradicate-0.2.4.tar.gz", hash = "sha256:b693e9dfe6da42dbc7fb75af8486495b9414d1ab0372d15efcf85a2ac85fd368"}, - {file = "flake8_eradicate-0.2.4-py3-none-any.whl", hash = "sha256:b0bcdbb70a489fb799f9ee11fefc57bd0d3251e1ea9bdc5bf454443cccfd620c"}, + {file = "flake8-eradicate-0.3.0.tar.gz", hash = "sha256:d0b3d283d85079917acbfe39b9d637385cd82cba3ae3d76c1278c07ddcf0d9b9"}, + {file = "flake8_eradicate-0.3.0-py3-none-any.whl", hash = "sha256:e8b32b32300bfb407fe7ef74667c8d2d3a6a81bdf6f09c14a7bcc82b7b870f8b"}, ] flake8-fixme = [ {file = "flake8-fixme-1.1.1.tar.gz", hash = "sha256:50cade07d27a4c30d4f12351478df87339e67640c83041b664724bda6d16f33a"}, @@ -879,23 +1236,42 @@ flake8-isort = [ {file = "flake8-isort-2.9.1.tar.gz", hash = "sha256:0d34b266080e1748412b203a1690792245011706b1858c203476b43460bf3652"}, {file = "flake8_isort-2.9.1-py2.py3-none-any.whl", hash = "sha256:a77df28778a1ac6ac4153339ebd9d252935f3ed4379872d4f8b84986296d8cc3"}, ] +flake8-polyfill = [ + {file = "flake8-polyfill-1.0.2.tar.gz", hash = "sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda"}, + {file = "flake8_polyfill-1.0.2-py2.py3-none-any.whl", hash = "sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9"}, +] flake8-variables-names = [ {file = "flake8_variables_names-0.0.3.tar.gz", hash = "sha256:d109f5a8fe8c20d64e165287330f1b0160b442d7f96e1527124ba1b63c438347"}, ] future = [ {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, ] +gitdb = [ + {file = "gitdb-4.0.5-py3-none-any.whl", hash = "sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac"}, + {file = "gitdb-4.0.5.tar.gz", hash = "sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9"}, +] +gitpython = [ + {file = "GitPython-3.1.2-py3-none-any.whl", hash = "sha256:da3b2cf819974789da34f95ac218ef99f515a928685db141327c09b73dd69c09"}, + {file = "GitPython-3.1.2.tar.gz", hash = "sha256:864a47472548f3ba716ca202e034c1900f197c0fb3a08f641c20c3cafd15ed94"}, +] identify = [ - {file = "identify-1.4.14-py2.py3-none-any.whl", hash = "sha256:2bb8760d97d8df4408f4e805883dad26a2d076f04be92a10a3e43f09c6060742"}, - {file = "identify-1.4.14.tar.gz", hash = "sha256:faffea0fd8ec86bb146ac538ac350ed0c73908326426d387eded0bcc9d077522"}, + {file = "identify-1.4.15-py2.py3-none-any.whl", hash = "sha256:88ed90632023e52a6495749c6732e61e08ec9f4f04e95484a5c37b9caf40283c"}, + {file = "identify-1.4.15.tar.gz", hash = "sha256:23c18d97bb50e05be1a54917ee45cc61d57cb96aedc06aabb2b02331edf0dbf0"}, +] +idna = [ + {file = "idna-2.9-py2.py3-none-any.whl", hash = "sha256:a068a21ceac8a4d63dbfd964670474107f541babbd2250d61922f029858365fa"}, + {file = "idna-2.9.tar.gz", hash = "sha256:7588d1c14ae4c77d74036e8c22ff447b26d0fde8f007354fd48a7814db15b7cb"}, +] +idna-ssl = [ + {file = "idna-ssl-1.1.0.tar.gz", hash = "sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c"}, ] importlib-metadata = [ {file = "importlib_metadata-1.6.0-py2.py3-none-any.whl", hash = "sha256:2a688cbaa90e0cc587f1df48bdc97a6eadccdcd9c35fb3f976a09e3b5016d90f"}, {file = "importlib_metadata-1.6.0.tar.gz", hash = "sha256:34513a8a0c4962bc66d35b359558fd8a5e10cd472d37aec5f66858addef32c1e"}, ] importlib-resources = [ - {file = "importlib_resources-1.4.0-py2.py3-none-any.whl", hash = "sha256:dd98ceeef3f5ad2ef4cc287b8586da4ebad15877f351e9688987ad663a0a29b8"}, - {file = "importlib_resources-1.4.0.tar.gz", hash = "sha256:4019b6a9082d8ada9def02bece4a76b131518866790d58fdda0b5f8c603b36c2"}, + {file = "importlib_resources-1.5.0-py2.py3-none-any.whl", hash = "sha256:85dc0b9b325ff78c8bef2e4ff42616094e16b98ebd5e3b50fe7e2f0bbcdcde49"}, + {file = "importlib_resources-1.5.0.tar.gz", hash = "sha256:6f87df66833e1942667108628ec48900e02a4ab4ad850e25fbf07cb17cf734ca"}, ] isort = [ {file = "isort-4.3.21-py2.py3-none-any.whl", hash = "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd"}, @@ -906,16 +1282,20 @@ jmespath = [ {file = "jmespath-0.9.5.tar.gz", hash = "sha256:cca55c8d153173e21baa59983015ad0daf603f9cb799904ff057bfb8ff8dc2d9"}, ] jsonpickle = [ - {file = "jsonpickle-1.4-py2.py3-none-any.whl", hash = "sha256:3d71018794242f6b1640f779a94a192500f73ceed9ef579b4f01799171ec3fb3"}, - {file = "jsonpickle-1.4.tar.gz", hash = "sha256:e8ca6ec3f379f5eee6e11380d48db220aacc282b480dea46b11cc6f6009d1cdb"}, + {file = "jsonpickle-1.4.1-py2.py3-none-any.whl", hash = "sha256:8919c166bac0574e3d74425c7559434062002d9dfc0ac2afa6dc746ba4a19439"}, + {file = "jsonpickle-1.4.1.tar.gz", hash = "sha256:e8d4b7cd0bd6826001a74377df1079a76ad8bae0f909282de2554164c837c8ba"}, ] mako = [ {file = "Mako-1.1.2-py2.py3-none-any.whl", hash = "sha256:8e8b53c71c7e59f3de716b6832c4e401d903af574f6962edbbbf6ecc2a5fe6c9"}, {file = "Mako-1.1.2.tar.gz", hash = "sha256:3139c5d64aa5d175dbafb95027057128b5fbd05a40c53999f3905ceb53366d9d"}, ] +mando = [ + {file = "mando-0.6.4-py2.py3-none-any.whl", hash = "sha256:4ce09faec7e5192ffc3c57830e26acba0fd6cd11e1ee81af0d4df0657463bd1c"}, + {file = "mando-0.6.4.tar.gz", hash = "sha256:79feb19dc0f097daa64a1243db578e7674909b75f88ac2220f1c065c10a0d960"}, +] markdown = [ - {file = "Markdown-3.2.1-py2.py3-none-any.whl", hash = "sha256:e4795399163109457d4c5af2183fbe6b60326c17cfdf25ce6e7474c6624f725d"}, - {file = "Markdown-3.2.1.tar.gz", hash = "sha256:90fee683eeabe1a92e149f7ba74e5ccdc81cd397bd6c516d93a8da0ef90b6902"}, + {file = "Markdown-3.2.2-py3-none-any.whl", hash = "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59"}, + {file = "Markdown-3.2.2.tar.gz", hash = "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17"}, ] markupsafe = [ {file = "MarkupSafe-1.1.1-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161"}, @@ -960,6 +1340,25 @@ more-itertools = [ {file = "more-itertools-8.2.0.tar.gz", hash = "sha256:b1ddb932186d8a6ac451e1d95844b382f55e12686d51ca0c68b6f61f2ab7a507"}, {file = "more_itertools-8.2.0-py3-none-any.whl", hash = "sha256:5dd8bcf33e5f9513ffa06d5ad33d78f31e1931ac9a18f33d37e77a180d393a7c"}, ] +multidict = [ + {file = "multidict-4.7.5-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:fc3b4adc2ee8474cb3cd2a155305d5f8eda0a9c91320f83e55748e1fcb68f8e3"}, + {file = "multidict-4.7.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:42f56542166040b4474c0c608ed051732033cd821126493cf25b6c276df7dd35"}, + {file = "multidict-4.7.5-cp35-cp35m-win32.whl", hash = "sha256:7774e9f6c9af3f12f296131453f7b81dabb7ebdb948483362f5afcaac8a826f1"}, + {file = "multidict-4.7.5-cp35-cp35m-win_amd64.whl", hash = "sha256:c2c37185fb0af79d5c117b8d2764f4321eeb12ba8c141a95d0aa8c2c1d0a11dd"}, + {file = "multidict-4.7.5-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:e439c9a10a95cb32abd708bb8be83b2134fa93790a4fb0535ca36db3dda94d20"}, + {file = "multidict-4.7.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:85cb26c38c96f76b7ff38b86c9d560dea10cf3459bb5f4caf72fc1bb932c7136"}, + {file = "multidict-4.7.5-cp36-cp36m-win32.whl", hash = "sha256:620b37c3fea181dab09267cd5a84b0f23fa043beb8bc50d8474dd9694de1fa6e"}, + {file = "multidict-4.7.5-cp36-cp36m-win_amd64.whl", hash = "sha256:6e6fef114741c4d7ca46da8449038ec8b1e880bbe68674c01ceeb1ac8a648e78"}, + {file = "multidict-4.7.5-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:a326f4240123a2ac66bb163eeba99578e9d63a8654a59f4688a79198f9aa10f8"}, + {file = "multidict-4.7.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dc561313279f9d05a3d0ffa89cd15ae477528ea37aa9795c4654588a3287a9ab"}, + {file = "multidict-4.7.5-cp37-cp37m-win32.whl", hash = "sha256:4b7df040fb5fe826d689204f9b544af469593fb3ff3a069a6ad3409f742f5928"}, + {file = "multidict-4.7.5-cp37-cp37m-win_amd64.whl", hash = "sha256:317f96bc0950d249e96d8d29ab556d01dd38888fbe68324f46fd834b430169f1"}, + {file = "multidict-4.7.5-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:b51249fdd2923739cd3efc95a3d6c363b67bbf779208e9f37fd5e68540d1a4d4"}, + {file = "multidict-4.7.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:ae402f43604e3b2bc41e8ea8b8526c7fa7139ed76b0d64fc48e28125925275b2"}, + {file = "multidict-4.7.5-cp38-cp38-win32.whl", hash = "sha256:bb519becc46275c594410c6c28a8a0adc66fe24fef154a9addea54c1adb006f5"}, + {file = "multidict-4.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:544fae9261232a97102e27a926019100a9db75bec7b37feedd74b3aa82f29969"}, + {file = "multidict-4.7.5.tar.gz", hash = "sha256:aee283c49601fa4c13adc64c09c978838a7e812f85377ae130a24d7198c0331e"}, +] nodeenv = [ {file = "nodeenv-1.3.5-py2.py3-none-any.whl", hash = "sha256:5b2438f2e42af54ca968dd1b374d14a1194848955187b0e5e4be1f73813a5212"}, ] @@ -971,6 +1370,10 @@ pathspec = [ {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, ] +pbr = [ + {file = "pbr-5.4.5-py2.py3-none-any.whl", hash = "sha256:579170e23f8e0c2f24b0de612f71f648eccb79fb1322c814ae6b3c07b5ba23e8"}, + {file = "pbr-5.4.5.tar.gz", hash = "sha256:07f558fece33b05caf857474a366dfcc00562bca13dd8b47b2b3e22d9f9bf55c"}, +] pdoc3 = [ {file = "pdoc3-0.7.5.tar.gz", hash = "sha256:ebca75b7fcf23f3b4320abe23339834d3f08c28517718e9d29e555fc38eeb33c"}, ] @@ -987,20 +1390,23 @@ py = [ {file = "py-1.8.1.tar.gz", hash = "sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa"}, ] pycodestyle = [ - {file = "pycodestyle-2.5.0-py2.py3-none-any.whl", hash = "sha256:95a2219d12372f05704562a14ec30bc76b05a5b297b21a5dfe3f6fac3491ae56"}, - {file = "pycodestyle-2.5.0.tar.gz", hash = "sha256:e40a936c9a450ad81df37f549d676d127b1b66000a6c500caa2b085bc0ca976c"}, + {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, + {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, ] pyflakes = [ - {file = "pyflakes-2.1.1-py2.py3-none-any.whl", hash = "sha256:17dbeb2e3f4d772725c777fabc446d5634d1038f234e77343108ce445ea69ce0"}, - {file = "pyflakes-2.1.1.tar.gz", hash = "sha256:d976835886f8c5b31d47970ed689944a0262b5f3afa00a5a7b4dc81e5449f8a2"}, + {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, + {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, ] pyparsing = [ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, ] pytest = [ - {file = "pytest-5.4.1-py3-none-any.whl", hash = "sha256:0e5b30f5cb04e887b91b1ee519fa3d89049595f428c1db76e73bd7f17b09b172"}, - {file = "pytest-5.4.1.tar.gz", hash = "sha256:84dde37075b8805f3d1f392cc47e38a0e59518fb46a431cfdaf7cf1ce805f970"}, + {file = "pytest-5.4.2-py3-none-any.whl", hash = "sha256:95c710d0a72d91c13fae35dce195633c929c3792f54125919847fdcdf7caa0d3"}, + {file = "pytest-5.4.2.tar.gz", hash = "sha256:eb2b5e935f6a019317e455b6da83dd8650ac9ffd2ee73a7b657a30873d67a698"}, +] +pytest-asyncio = [ + {file = "pytest-asyncio-0.12.0.tar.gz", hash = "sha256:475bd2f3dc0bc11d2463656b3cbaafdbec5a47b47508ea0b329ee693040eebd2"}, ] pytest-cov = [ {file = "pytest-cov-2.8.1.tar.gz", hash = "sha256:cc6742d8bac45070217169f5f72ceee1e0e55b0221f54bcf24845972d3a47f2b"}, @@ -1027,33 +1433,53 @@ pyyaml = [ {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, ] +radon = [ + {file = "radon-4.1.0-py2.py3-none-any.whl", hash = "sha256:0c18111ec6cfe7f664bf9db6c51586714ac8c6d9741542706df8a85aca39b99a"}, + {file = "radon-4.1.0.tar.gz", hash = "sha256:56082c52206db45027d4a73612e1b21663c4cc2be3760fee769d966fd7efdd6d"}, +] regex = [ - {file = "regex-2020.4.4-cp27-cp27m-win32.whl", hash = "sha256:90742c6ff121a9c5b261b9b215cb476eea97df98ea82037ec8ac95d1be7a034f"}, - {file = "regex-2020.4.4-cp27-cp27m-win_amd64.whl", hash = "sha256:24f4f4062eb16c5bbfff6a22312e8eab92c2c99c51a02e39b4eae54ce8255cd1"}, - {file = "regex-2020.4.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:08119f707f0ebf2da60d2f24c2f39ca616277bb67ef6c92b72cbf90cbe3a556b"}, - {file = "regex-2020.4.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:c9423a150d3a4fc0f3f2aae897a59919acd293f4cb397429b120a5fcd96ea3db"}, - {file = "regex-2020.4.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:c087bff162158536387c53647411db09b6ee3f9603c334c90943e97b1052a156"}, - {file = "regex-2020.4.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:1cbe0fa0b7f673400eb29e9ef41d4f53638f65f9a2143854de6b1ce2899185c3"}, - {file = "regex-2020.4.4-cp36-cp36m-win32.whl", hash = "sha256:0ce9537396d8f556bcfc317c65b6a0705320701e5ce511f05fc04421ba05b8a8"}, - {file = "regex-2020.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:7e1037073b1b7053ee74c3c6c0ada80f3501ec29d5f46e42669378eae6d4405a"}, - {file = "regex-2020.4.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4385f12aa289d79419fede43f979e372f527892ac44a541b5446617e4406c468"}, - {file = "regex-2020.4.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a58dd45cb865be0ce1d5ecc4cfc85cd8c6867bea66733623e54bd95131f473b6"}, - {file = "regex-2020.4.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:ccccdd84912875e34c5ad2d06e1989d890d43af6c2242c6fcfa51556997af6cd"}, - {file = "regex-2020.4.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:ea4adf02d23b437684cd388d557bf76e3afa72f7fed5bbc013482cc00c816948"}, - {file = "regex-2020.4.4-cp37-cp37m-win32.whl", hash = "sha256:2294f8b70e058a2553cd009df003a20802ef75b3c629506be20687df0908177e"}, - {file = "regex-2020.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:e91ba11da11cf770f389e47c3f5c30473e6d85e06d7fd9dcba0017d2867aab4a"}, - {file = "regex-2020.4.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:5635cd1ed0a12b4c42cce18a8d2fb53ff13ff537f09de5fd791e97de27b6400e"}, - {file = "regex-2020.4.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:23069d9c07e115537f37270d1d5faea3e0bdded8279081c4d4d607a2ad393683"}, - {file = "regex-2020.4.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:c162a21e0da33eb3d31a3ac17a51db5e634fc347f650d271f0305d96601dc15b"}, - {file = "regex-2020.4.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:fb95debbd1a824b2c4376932f2216cc186912e389bdb0e27147778cf6acb3f89"}, - {file = "regex-2020.4.4-cp38-cp38-win32.whl", hash = "sha256:2a3bf8b48f8e37c3a40bb3f854bf0121c194e69a650b209628d951190b862de3"}, - {file = "regex-2020.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bfed051dbff32fd8945eccca70f5e22b55e4148d2a8a45141a3b053d6455ae3"}, - {file = "regex-2020.4.4.tar.gz", hash = "sha256:295badf61a51add2d428a46b8580309c520d8b26e769868b922750cf3ce67142"}, + {file = "regex-2020.5.7-cp27-cp27m-win32.whl", hash = "sha256:5493a02c1882d2acaaf17be81a3b65408ff541c922bfd002535c5f148aa29f74"}, + {file = "regex-2020.5.7-cp27-cp27m-win_amd64.whl", hash = "sha256:021a0ae4d2baeeb60a3014805a2096cb329bd6d9f30669b7ad0da51a9cb73349"}, + {file = "regex-2020.5.7-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4df91094ced6f53e71f695c909d9bad1cca8761d96fd9f23db12245b5521136e"}, + {file = "regex-2020.5.7-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7ce4a213a96d6c25eeae2f7d60d4dad89ac2b8134ec3e69db9bc522e2c0f9388"}, + {file = "regex-2020.5.7-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:3b059e2476b327b9794c792c855aa05531a3f3044737e455d283c7539bd7534d"}, + {file = "regex-2020.5.7-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:652ab4836cd5531d64a34403c00ada4077bb91112e8bcdae933e2eae232cf4a8"}, + {file = "regex-2020.5.7-cp36-cp36m-win32.whl", hash = "sha256:1e2255ae938a36e9bd7db3b93618796d90c07e5f64dd6a6750c55f51f8b76918"}, + {file = "regex-2020.5.7-cp36-cp36m-win_amd64.whl", hash = "sha256:8127ca2bf9539d6a64d03686fd9e789e8c194fc19af49b69b081f8c7e6ecb1bc"}, + {file = "regex-2020.5.7-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f7f2f4226db6acd1da228adf433c5c3792858474e49d80668ea82ac87cf74a03"}, + {file = "regex-2020.5.7-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2bc6a17a7fa8afd33c02d51b6f417fc271538990297167f68a98cae1c9e5c945"}, + {file = "regex-2020.5.7-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:b7c9f65524ff06bf70c945cd8d8d1fd90853e27ccf86026af2afb4d9a63d06b1"}, + {file = "regex-2020.5.7-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:fa09da4af4e5b15c0e8b4986a083f3fd159302ea115a6cc0649cd163435538b8"}, + {file = "regex-2020.5.7-cp37-cp37m-win32.whl", hash = "sha256:669a8d46764a09f198f2e91fc0d5acdac8e6b620376757a04682846ae28879c4"}, + {file = "regex-2020.5.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b5b5b2e95f761a88d4c93691716ce01dc55f288a153face1654f868a8034f494"}, + {file = "regex-2020.5.7-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0ff50843535593ee93acab662663cb2f52af8e31c3f525f630f1dc6156247938"}, + {file = "regex-2020.5.7-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1b17bf37c2aefc4cac8436971fe6ee52542ae4225cfc7762017f7e97a63ca998"}, + {file = "regex-2020.5.7-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:04d6e948ef34d3eac133bedc0098364a9e635a7914f050edb61272d2ddae3608"}, + {file = "regex-2020.5.7-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:5b741ecc3ad3e463d2ba32dce512b412c319993c1bb3d999be49e6092a769fb2"}, + {file = "regex-2020.5.7-cp38-cp38-win32.whl", hash = "sha256:099568b372bda492be09c4f291b398475587d49937c659824f891182df728cdf"}, + {file = "regex-2020.5.7-cp38-cp38-win_amd64.whl", hash = "sha256:3ab5e41c4ed7cd4fa426c50add2892eb0f04ae4e73162155cd668257d02259dd"}, + {file = "regex-2020.5.7.tar.gz", hash = "sha256:73a10404867b835f1b8a64253e4621908f0d71150eb4e97ab2e7e441b53e9451"}, +] +requests = [ + {file = "requests-2.23.0-py2.py3-none-any.whl", hash = "sha256:43999036bfa82904b6af1d99e4882b560e5e2c68e5c4b0aa03b655f3d7d73fee"}, + {file = "requests-2.23.0.tar.gz", hash = "sha256:b3f43d496c6daba4493e7c431722aeb7dbc6288f52a6e04e7b6023b0247817e6"}, +] +s3transfer = [ + {file = "s3transfer-0.3.3-py2.py3-none-any.whl", hash = "sha256:2482b4259524933a022d59da830f51bd746db62f047d6eb213f2f8855dcb8a13"}, + {file = "s3transfer-0.3.3.tar.gz", hash = "sha256:921a37e2aefc64145e7b73d50c71bb4f26f46e4c9f414dc648c6245ff92cf7db"}, ] six = [ {file = "six-1.14.0-py2.py3-none-any.whl", hash = "sha256:8f3cd2e254d8f793e7f3d6d9df77b92252b52637291d0f0da013c76ea2724b6c"}, {file = "six-1.14.0.tar.gz", hash = "sha256:236bdbdce46e6e6a3d61a337c0f8b763ca1e8717c03b369e87a7ec7ce1319c0a"}, ] +smmap = [ + {file = "smmap-3.0.4-py2.py3-none-any.whl", hash = "sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4"}, + {file = "smmap-3.0.4.tar.gz", hash = "sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24"}, +] +stevedore = [ + {file = "stevedore-1.32.0-py2.py3-none-any.whl", hash = "sha256:a4e7dc759fb0f2e3e2f7d8ffe2358c19d45b9b8297f393ef1256858d82f69c9b"}, + {file = "stevedore-1.32.0.tar.gz", hash = "sha256:18afaf1d623af5950cc0f7e75e70f917784c73b652a34a12d90b309451b5500b"}, +] testfixtures = [ {file = "testfixtures-6.14.1-py2.py3-none-any.whl", hash = "sha256:30566e24a1b34e4d3f8c13abf62557d01eeb4480bcb8f1745467bfb0d415a7d9"}, {file = "testfixtures-6.14.1.tar.gz", hash = "sha256:58d2b3146d93bc5ddb0cd24e0ccacb13e29bdb61e5c81235c58f7b8ee4470366"}, @@ -1086,13 +1512,18 @@ typed-ast = [ {file = "typed_ast-1.4.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34"}, {file = "typed_ast-1.4.1.tar.gz", hash = "sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b"}, ] +typing-extensions = [ + {file = "typing_extensions-3.7.4.2-py2-none-any.whl", hash = "sha256:f8d2bd89d25bc39dabe7d23df520442fa1d8969b82544370e03d88b5a591c392"}, + {file = "typing_extensions-3.7.4.2-py3-none-any.whl", hash = "sha256:6e95524d8a547a91e08f404ae485bbb71962de46967e1b71a0cb89af24e761c5"}, + {file = "typing_extensions-3.7.4.2.tar.gz", hash = "sha256:79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae"}, +] urllib3 = [ {file = "urllib3-1.25.9-py2.py3-none-any.whl", hash = "sha256:88206b0eb87e6d677d424843ac5209e3fb9d0190d0ee169599165ec25e9d9115"}, {file = "urllib3-1.25.9.tar.gz", hash = "sha256:3018294ebefce6572a474f0604c2021e33b3fd8006ecd11d62107a5d2a963527"}, ] virtualenv = [ - {file = "virtualenv-20.0.18-py2.py3-none-any.whl", hash = "sha256:5021396e8f03d0d002a770da90e31e61159684db2859d0ba4850fbea752aa675"}, - {file = "virtualenv-20.0.18.tar.gz", hash = "sha256:ac53ade75ca189bc97b6c1d9ec0f1a50efe33cbf178ae09452dcd9fd309013c1"}, + {file = "virtualenv-20.0.20-py2.py3-none-any.whl", hash = "sha256:b4c14d4d73a0c23db267095383c4276ef60e161f94fde0427f2f21a0132dde74"}, + {file = "virtualenv-20.0.20.tar.gz", hash = "sha256:fd0e54dec8ac96c1c7c87daba85f0a59a7c37fe38748e154306ca21c73244637"}, ] wcwidth = [ {file = "wcwidth-0.1.9-py2.py3-none-any.whl", hash = "sha256:cafe2186b3c009a04067022ce1dcd79cb38d8d65ee4f4791b8888d6599d1bbe1"}, @@ -1101,6 +1532,29 @@ wcwidth = [ wrapt = [ {file = "wrapt-1.12.1.tar.gz", hash = "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7"}, ] +xenon = [ + {file = "xenon-0.7.0-py2.py3-none-any.whl", hash = "sha256:83e98f67b7077c95c25c3402aea6203dd2ed6256708b76ed9751e9dbf1aba125"}, + {file = "xenon-0.7.0.tar.gz", hash = "sha256:5e6433c9297d965bf666256a0a030b6e13660ab87680220c4eb07241f101625b"}, +] +yarl = [ + {file = "yarl-1.4.2-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:3ce3d4f7c6b69c4e4f0704b32eca8123b9c58ae91af740481aa57d7857b5e41b"}, + {file = "yarl-1.4.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:a4844ebb2be14768f7994f2017f70aca39d658a96c786211be5ddbe1c68794c1"}, + {file = "yarl-1.4.2-cp35-cp35m-win32.whl", hash = "sha256:d8cdee92bc930d8b09d8bd2043cedd544d9c8bd7436a77678dd602467a993080"}, + {file = "yarl-1.4.2-cp35-cp35m-win_amd64.whl", hash = "sha256:c2b509ac3d4b988ae8769901c66345425e361d518aecbe4acbfc2567e416626a"}, + {file = "yarl-1.4.2-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:308b98b0c8cd1dfef1a0311dc5e38ae8f9b58349226aa0533f15a16717ad702f"}, + {file = "yarl-1.4.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:944494be42fa630134bf907714d40207e646fd5a94423c90d5b514f7b0713fea"}, + {file = "yarl-1.4.2-cp36-cp36m-win32.whl", hash = "sha256:5b10eb0e7f044cf0b035112446b26a3a2946bca9d7d7edb5e54a2ad2f6652abb"}, + {file = "yarl-1.4.2-cp36-cp36m-win_amd64.whl", hash = "sha256:a161de7e50224e8e3de6e184707476b5a989037dcb24292b391a3d66ff158e70"}, + {file = "yarl-1.4.2-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:26d7c90cb04dee1665282a5d1a998defc1a9e012fdca0f33396f81508f49696d"}, + {file = "yarl-1.4.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0c2ab325d33f1b824734b3ef51d4d54a54e0e7a23d13b86974507602334c2cce"}, + {file = "yarl-1.4.2-cp37-cp37m-win32.whl", hash = "sha256:e15199cdb423316e15f108f51249e44eb156ae5dba232cb73be555324a1d49c2"}, + {file = "yarl-1.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:2098a4b4b9d75ee352807a95cdf5f10180db903bc5b7270715c6bbe2551f64ce"}, + {file = "yarl-1.4.2-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c9959d49a77b0e07559e579f38b2f3711c2b8716b8410b320bf9713013215a1b"}, + {file = "yarl-1.4.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:25e66e5e2007c7a39541ca13b559cd8ebc2ad8fe00ea94a2aad28a9b1e44e5ae"}, + {file = "yarl-1.4.2-cp38-cp38-win32.whl", hash = "sha256:6faa19d3824c21bcbfdfce5171e193c8b4ddafdf0ac3f129ccf0cdfcb083e462"}, + {file = "yarl-1.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:0ca2f395591bbd85ddd50a82eb1fde9c1066fafe888c5c7cc1d810cf03fd3cc6"}, + {file = "yarl-1.4.2.tar.gz", hash = "sha256:58cd9c469eced558cd81aa3f484b2924e8897049e06889e8ff2510435b7ef74b"}, +] zipp = [ {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"}, {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"}, diff --git a/python/pyproject.toml b/python/pyproject.toml index 99a3564ca22..95845cc9f9c 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.8.1" +version = "0.9.0" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ @@ -19,8 +19,8 @@ license = "MIT-0" [tool.poetry.dependencies] python = "^3.6" -aws-xray-sdk = "^2.4.3" -fastjsonschema = "^2.14.4" +aws-xray-sdk = "~=2.5.0" +fastjsonschema = "~=2.14.4" [tool.poetry.dev-dependencies] coverage = {extras = ["toml"], version = "^5.0.3"} @@ -28,11 +28,9 @@ pytest = "^5.2" black = "^19.10b0" flake8 = "^3.7.9" flake8-black = "^0.1.1" -flake8-bugbear = "^20.1.4" flake8-builtins = "^1.4.2" flake8-comprehensions = "^3.2.2" flake8-debugger = "^3.2.1" -flake8-eradicate = "^0.2.4" flake8-fixme = "^1.1.1" flake8-isort = "^2.8.0" flake8-variables-names = "^0.0.3" @@ -41,6 +39,14 @@ pre-commit = "^2.1.0" pytest-cov = "^2.8.1" pytest-mock = "^2.0.0" pdoc3 = "^0.7.5" +pytest-asyncio = "^0.12.0" +aioboto3 = "^8.0.3" +aiohttp = "^3.6.2" +bandit = "^1.6.2" +radon = "^4.1.0" +xenon = "^0.7.0" +flake8-bugbear = "^20.1.4" +flake8-eradicate = "^0.3.0" [tool.coverage.run] source = ["aws_lambda_powertools"] diff --git a/python/tests/unit/test_tracing.py b/python/tests/unit/test_tracing.py index f8f43de0bf4..f5ad586cb00 100644 --- a/python/tests/unit/test_tracing.py +++ b/python/tests/unit/test_tracing.py @@ -1,3 +1,5 @@ +import sys +from typing import NamedTuple from unittest import mock import pytest @@ -11,19 +13,23 @@ def dummy_response(): @pytest.fixture -def xray_stub(mocker): - class XRayStub: +def provider_stub(mocker): + class CustomProvider: def __init__( self, put_metadata_mock: mocker.MagicMock = None, put_annotation_mock: mocker.MagicMock = None, - begin_subsegment_mock: mocker.MagicMock = None, - end_subsegment_mock: mocker.MagicMock = None, + in_subsegment: mocker.MagicMock = None, + in_subsegment_async: mocker.MagicMock = None, + patch_mock: mocker.MagicMock = None, + disable_tracing_provider_mock: mocker.MagicMock = None, ): self.put_metadata_mock = put_metadata_mock or mocker.MagicMock() self.put_annotation_mock = put_annotation_mock or mocker.MagicMock() - self.begin_subsegment_mock = begin_subsegment_mock or mocker.MagicMock() - self.end_subsegment_mock = end_subsegment_mock or mocker.MagicMock() + self.in_subsegment = in_subsegment or mocker.MagicMock() + self.patch_mock = patch_mock or mocker.MagicMock() + self.disable_tracing_provider_mock = disable_tracing_provider_mock or mocker.MagicMock() + self.in_subsegment_async = in_subsegment_async or mocker.MagicMock(spec=True) def put_metadata(self, *args, **kwargs): return self.put_metadata_mock(*args, **kwargs) @@ -31,32 +37,50 @@ def put_metadata(self, *args, **kwargs): def put_annotation(self, *args, **kwargs): return self.put_annotation_mock(*args, **kwargs) - def begin_subsegment(self, *args, **kwargs): - return self.begin_subsegment_mock(*args, **kwargs) + def in_subsegment(self, *args, **kwargs): + return self.in_subsegment(*args, **kwargs) - def end_subsegment(self, *args, **kwargs): - return self.end_subsegment_mock(*args, **kwargs) + def patch(self, *args, **kwargs): + return self.patch_mock(*args, **kwargs) - return XRayStub + return CustomProvider @pytest.fixture(scope="function", autouse=True) -def reset_tracing_config(): +def reset_tracing_config(mocker): Tracer._reset_config() + # reset global cold start module + mocker.patch("aws_lambda_powertools.tracing.tracer.is_cold_start", return_value=True) yield -def test_tracer_lambda_handler(mocker, dummy_response, xray_stub): - put_metadata_mock = mocker.MagicMock() - begin_subsegment_mock = mocker.MagicMock() - end_subsegment_mock = mocker.MagicMock() +@pytest.fixture +def in_subsegment_mock(): + class Async_context_manager(mock.MagicMock): + async def __aenter__(self, *args, **kwargs): + return self.__enter__() - xray_provider = xray_stub( - put_metadata_mock=put_metadata_mock, - begin_subsegment_mock=begin_subsegment_mock, - end_subsegment_mock=end_subsegment_mock, - ) - tracer = Tracer(provider=xray_provider, service="booking") + async def __aexit__(self, *args, **kwargs): + return self.__exit__(*args, **kwargs) + + class In_subsegment(NamedTuple): + in_subsegment: mock.MagicMock = Async_context_manager() + put_annotation: mock.MagicMock = mock.MagicMock() + put_metadata: mock.MagicMock = mock.MagicMock() + + in_subsegment = In_subsegment() + in_subsegment.in_subsegment.return_value.__enter__.return_value.put_annotation = in_subsegment.put_annotation + in_subsegment.in_subsegment.return_value.__enter__.return_value.put_metadata = in_subsegment.put_metadata + + if sys.version_info >= (3, 8): # 3.8 introduced AsyncMock + in_subsegment.in_subsegment.return_value.__aenter__.return_value.put_metadata = in_subsegment.put_metadata + + yield in_subsegment + + +def test_tracer_lambda_handler(mocker, dummy_response, provider_stub, in_subsegment_mock): + provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) + tracer = Tracer(provider=provider, service="booking") @tracer.capture_lambda_handler def handler(event, context): @@ -64,22 +88,18 @@ def handler(event, context): handler({}, mocker.MagicMock()) - assert begin_subsegment_mock.call_count == 1 - assert begin_subsegment_mock.call_args == mocker.call(name="## handler") - assert end_subsegment_mock.call_count == 1 - assert put_metadata_mock.call_args == mocker.call( + assert in_subsegment_mock.in_subsegment.call_count == 1 + assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## handler") + assert in_subsegment_mock.put_metadata.call_args == mocker.call( key="lambda handler response", value=dummy_response, namespace="booking" ) + assert in_subsegment_mock.put_annotation.call_count == 1 + assert in_subsegment_mock.put_annotation.call_args == mocker.call(key="ColdStart", value=True) -def test_tracer_method(mocker, dummy_response, xray_stub): - put_metadata_mock = mocker.MagicMock() - put_annotation_mock = mocker.MagicMock() - begin_subsegment_mock = mocker.MagicMock() - end_subsegment_mock = mocker.MagicMock() - - xray_provider = xray_stub(put_metadata_mock, put_annotation_mock, begin_subsegment_mock, end_subsegment_mock) - tracer = Tracer(provider=xray_provider, service="booking") +def test_tracer_method(mocker, dummy_response, provider_stub, in_subsegment_mock): + provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) + tracer = Tracer(provider=provider, service="booking") @tracer.capture_method def greeting(name, message): @@ -87,51 +107,37 @@ def greeting(name, message): greeting(name="Foo", message="Bar") - assert begin_subsegment_mock.call_count == 1 - assert begin_subsegment_mock.call_args == mocker.call(name="## greeting") - assert end_subsegment_mock.call_count == 1 - assert put_metadata_mock.call_args == mocker.call( + assert in_subsegment_mock.in_subsegment.call_count == 1 + assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## greeting") + assert in_subsegment_mock.put_metadata.call_args == mocker.call( key="greeting response", value=dummy_response, namespace="booking" ) -def test_tracer_custom_metadata(mocker, dummy_response, xray_stub): +def test_tracer_custom_metadata(mocker, dummy_response, provider_stub): put_metadata_mock = mocker.MagicMock() - - xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) - - tracer = Tracer(provider=xray_provider, service="booking") annotation_key = "Booking response" annotation_value = {"bookingStatus": "CONFIRMED"} - @tracer.capture_lambda_handler - def handler(event, context): - tracer.put_metadata(annotation_key, annotation_value) - return dummy_response - - handler({}, mocker.MagicMock()) + provider = provider_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=provider, service="booking") + tracer.put_metadata(annotation_key, annotation_value) - assert put_metadata_mock.call_count == 2 + assert put_metadata_mock.call_count == 1 assert put_metadata_mock.call_args_list[0] == mocker.call( key=annotation_key, value=annotation_value, namespace="booking" ) -def test_tracer_custom_annotation(mocker, dummy_response, xray_stub): +def test_tracer_custom_annotation(mocker, dummy_response, provider_stub): put_annotation_mock = mocker.MagicMock() - - xray_provider = xray_stub(put_annotation_mock=put_annotation_mock) - - tracer = Tracer(provider=xray_provider, service="booking") annotation_key = "BookingId" annotation_value = "123456" - @tracer.capture_lambda_handler - def handler(event, context): - tracer.put_annotation(annotation_key, annotation_value) - return dummy_response + provider = provider_stub(put_annotation_mock=put_annotation_mock) + tracer = Tracer(provider=provider, service="booking") - handler({}, mocker.MagicMock()) + tracer.put_annotation(annotation_key, annotation_value) assert put_annotation_mock.call_count == 1 assert put_annotation_mock.call_args == mocker.call(key=annotation_key, value=annotation_value) @@ -155,10 +161,10 @@ def test_tracer_no_autopatch(patch_mock): assert patch_mock.call_count == 0 -def test_tracer_lambda_handler_empty_response_metadata(mocker, xray_stub): +def test_tracer_lambda_handler_empty_response_metadata(mocker, provider_stub): put_metadata_mock = mocker.MagicMock() - xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) - tracer = Tracer(provider=xray_provider) + provider = provider_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=provider) @tracer.capture_lambda_handler def handler(event, context): @@ -169,10 +175,10 @@ def handler(event, context): assert put_metadata_mock.call_count == 0 -def test_tracer_method_empty_response_metadata(mocker, xray_stub): +def test_tracer_method_empty_response_metadata(mocker, provider_stub): put_metadata_mock = mocker.MagicMock() - xray_provider = xray_stub(put_metadata_mock=put_metadata_mock) - tracer = Tracer(provider=xray_provider) + provider = provider_stub(put_metadata_mock=put_metadata_mock) + tracer = Tracer(provider=provider) @tracer.capture_method def greeting(name, message): @@ -181,3 +187,125 @@ def greeting(name, message): greeting(name="Foo", message="Bar") assert put_metadata_mock.call_count == 0 + + +@mock.patch("aws_lambda_powertools.tracing.tracer.aws_xray_sdk.core.patch") +@mock.patch("aws_lambda_powertools.tracing.tracer.aws_xray_sdk.core.patch_all") +def test_tracer_patch(xray_patch_all_mock, xray_patch_mock, mocker): + # GIVEN tracer is instantiated + # WHEN default X-Ray provider client is mocked + # THEN tracer should run just fine + + Tracer() + assert xray_patch_all_mock.call_count == 1 + + modules = ["boto3"] + Tracer(service="booking", patch_modules=modules) + + assert xray_patch_mock.call_count == 1 + assert xray_patch_mock.call_args == mocker.call(modules) + + +def test_tracer_method_exception_metadata(mocker, provider_stub, in_subsegment_mock): + + provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) + tracer = Tracer(provider=provider, service="booking") + + @tracer.capture_method + def greeting(name, message): + raise ValueError("test") + + with pytest.raises(ValueError): + greeting(name="Foo", message="Bar") + + put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] + assert put_metadata_mock_args["key"] == "greeting error" + assert put_metadata_mock_args["namespace"] == "booking" + + +def test_tracer_lambda_handler_exception_metadata(mocker, provider_stub, in_subsegment_mock): + + provider = provider_stub(in_subsegment=in_subsegment_mock.in_subsegment) + tracer = Tracer(provider=provider, service="booking") + + @tracer.capture_lambda_handler + def handler(event, context): + raise ValueError("test") + + with pytest.raises(ValueError): + handler({}, mocker.MagicMock()) + + put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] + assert put_metadata_mock_args["key"] == "booking error" + assert put_metadata_mock_args["namespace"] == "booking" + + +@pytest.mark.asyncio +async def test_tracer_method_nested_async(mocker, dummy_response, provider_stub, in_subsegment_mock): + provider = provider_stub(in_subsegment_async=in_subsegment_mock.in_subsegment) + tracer = Tracer(provider=provider, service="booking") + + @tracer.capture_method + async def greeting_2(name, message): + return dummy_response + + @tracer.capture_method + async def greeting(name, message): + await greeting_2(name, message) + return dummy_response + + await greeting(name="Foo", message="Bar") + + ( + in_subsegment_greeting_call_args, + in_subsegment_greeting2_call_args, + ) = in_subsegment_mock.in_subsegment.call_args_list + put_metadata_greeting2_call_args, put_metadata_greeting_call_args = in_subsegment_mock.put_metadata.call_args_list + + assert in_subsegment_mock.in_subsegment.call_count == 2 + assert in_subsegment_greeting_call_args == mocker.call(name="## greeting") + assert in_subsegment_greeting2_call_args == mocker.call(name="## greeting_2") + + assert in_subsegment_mock.put_metadata.call_count == 2 + assert put_metadata_greeting2_call_args == mocker.call( + key="greeting_2 response", value=dummy_response, namespace="booking" + ) + assert put_metadata_greeting_call_args == mocker.call( + key="greeting response", value=dummy_response, namespace="booking" + ) + + +@pytest.mark.asyncio +async def test_tracer_method_nested_async_disabled(dummy_response): + + tracer = Tracer(service="booking", disabled=True) + + @tracer.capture_method + async def greeting_2(name, message): + return dummy_response + + @tracer.capture_method + async def greeting(name, message): + await greeting_2(name, message) + return dummy_response + + ret = await greeting(name="Foo", message="Bar") + + assert ret == dummy_response + + +@pytest.mark.asyncio +async def test_tracer_method_exception_metadata_async(mocker, provider_stub, in_subsegment_mock): + provider = provider_stub(in_subsegment_async=in_subsegment_mock.in_subsegment) + tracer = Tracer(provider=provider, service="booking") + + @tracer.capture_method + async def greeting(name, message): + raise ValueError("test") + + with pytest.raises(ValueError): + await greeting(name="Foo", message="Bar") + + put_metadata_mock_args = in_subsegment_mock.put_metadata.call_args[1] + assert put_metadata_mock_args["key"] == "greeting error" + assert put_metadata_mock_args["namespace"] == "booking" From 5ad8e2028be89a79c713123957725dc3338a14ab Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Tue, 12 May 2020 14:27:06 +0100 Subject: [PATCH 17/21] improv: add project tenets Powertools now have official tenets that will guide us as we move towards GA. Tenets will also help us remain consistent across different language implementations as we grow. --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 9a1245bfa80..d827de8d7cd 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,17 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, structured logging and creating custom metrics asynchronously easier. +## Tenets + +* **AWS Lambda only** – We optimize for AWS Lambda functions environment only. Utilities might work with web frameworks, and non-Lambda environments though they are not officially supported. +* **Eases the adoption of best practices** – Utilities’ main priority is to facilitate best practices adoption defined in AWS Well-Architected Serverless Lens; everything else is optional. +* **Keep it lean** – Additional dependencies are carefully considered to ease maintenance, security, and to prevent negatively impacting startup time. +* **We strive for backwards compatibility** – New features and changes should keep backwards compatibility. If a breaking change cannot be avoided, the deprecation and migration process should be clearly defined. +* **We work backwards from the community** – We aim to strike a balance of what would +work for 80% of customers. Emerging practices are considered and discussed via request for +comments (RFCs) +* **Idiomatic** – Utilities follow language’s idioms and their best practices. + ## Powertools available * [Python - Beta](./python/README.md) From fad965715a7f7b6fff582a9ee66879087ee5d339 Mon Sep 17 00:00:00 2001 From: Heitor Lessa Date: Tue, 12 May 2020 16:25:16 +0100 Subject: [PATCH 18/21] chore: grammar issues --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index d827de8d7cd..048cfd8bcbc 100644 --- a/README.md +++ b/README.md @@ -6,14 +6,14 @@ A suite of utilities for AWS Lambda Functions that makes tracing with AWS X-Ray, ## Tenets -* **AWS Lambda only** – We optimize for AWS Lambda functions environment only. Utilities might work with web frameworks, and non-Lambda environments though they are not officially supported. -* **Eases the adoption of best practices** – Utilities’ main priority is to facilitate best practices adoption defined in AWS Well-Architected Serverless Lens; everything else is optional. -* **Keep it lean** – Additional dependencies are carefully considered to ease maintenance, security, and to prevent negatively impacting startup time. +* **AWS Lambda only** – We optimise for AWS Lambda function environments only. Utilities might work with web frameworks and non-Lambda environments, though they are not officially supported. +* **Eases the adoption of best practices** – The main priority of the utilities is to facilitate best practices adoption, as defined in the AWS Well-Architected Serverless Lens; all other functionality is optional. +* **Keep it lean** – Additional dependencies are carefully considered for security and ease of maintenance, and prevent negatively impacting startup time. * **We strive for backwards compatibility** – New features and changes should keep backwards compatibility. If a breaking change cannot be avoided, the deprecation and migration process should be clearly defined. -* **We work backwards from the community** – We aim to strike a balance of what would -work for 80% of customers. Emerging practices are considered and discussed via request for -comments (RFCs) -* **Idiomatic** – Utilities follow language’s idioms and their best practices. +* **We work backwards from the community** – We aim to strike a balance of what would work best for 80% of customers. Emerging practices are considered and discussed via Requests for Comment (RFCs) +* **Idiomatic** – Utilities follow programming language idioms and language-specific best practices. + +_`*` Core utilities are Tracer, Logger and Metrics. Optional utilities may vary across languages._ ## Powertools available From 6857f7c2ec18d545bab60647b22b1d9dda8beb76 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 14 May 2020 16:27:02 +0100 Subject: [PATCH 19/21] bugfix: resolves #31 aiohttp lazy import --- python/HISTORY.md | 6 ++++++ python/README.md | 2 ++ python/aws_lambda_powertools/tracing/__init__.py | 6 ++---- .../aws_lambda_powertools/tracing/extensions.py | 15 +++++++++++++++ python/pyproject.toml | 2 +- 5 files changed, 26 insertions(+), 5 deletions(-) create mode 100644 python/aws_lambda_powertools/tracing/extensions.py diff --git a/python/HISTORY.md b/python/HISTORY.md index b90bb22ec96..894e2b1fc05 100644 --- a/python/HISTORY.md +++ b/python/HISTORY.md @@ -1,5 +1,11 @@ # HISTORY +## May 14th + +**0.9.2** + +* **Tracer**: Bugfix - aiohttp lazy import so it's not a hard dependency + ## May 12th **0.9.0** diff --git a/python/README.md b/python/README.md index ab0efc45dbd..3208b668410 100644 --- a/python/README.md +++ b/python/README.md @@ -142,6 +142,8 @@ You can use `tracer.provider` attribute to access all methods provided by `xray_ **Example using aiohttp with an async context manager** +> NOTE: It expects you have `aiohttp` as a dependency. `aiohttp_trace_config` uses lazy import to create a trace_config object following `aiohttp` protocol. + ```python import asyncio import aiohttp diff --git a/python/aws_lambda_powertools/tracing/__init__.py b/python/aws_lambda_powertools/tracing/__init__.py index ece90f7d1bc..f45ac1fb73e 100644 --- a/python/aws_lambda_powertools/tracing/__init__.py +++ b/python/aws_lambda_powertools/tracing/__init__.py @@ -1,10 +1,8 @@ """Tracing utility """ -from aws_xray_sdk.ext.aiohttp.client import aws_xray_trace_config as aiohttp_trace_config -from .tracer import Tracer - -aiohttp_trace_config.__doc__ = "aiohttp extension for X-Ray (aws_xray_trace_config)" +from .extensions import aiohttp_trace_config +from .tracer import Tracer __all__ = ["Tracer", "aiohttp_trace_config"] diff --git a/python/aws_lambda_powertools/tracing/extensions.py b/python/aws_lambda_powertools/tracing/extensions.py new file mode 100644 index 00000000000..2bb0125e841 --- /dev/null +++ b/python/aws_lambda_powertools/tracing/extensions.py @@ -0,0 +1,15 @@ +def aiohttp_trace_config(): + """aiohttp extension for X-Ray (aws_xray_trace_config) + + It expects you to have aiohttp as a dependency. + + Returns + ------- + TraceConfig + aiohttp trace config + """ + from aws_xray_sdk.ext.aiohttp.client import aws_xray_trace_config + + aws_xray_trace_config.__doc__ = "aiohttp extension for X-Ray (aws_xray_trace_config)" + + return aws_xray_trace_config() diff --git a/python/pyproject.toml b/python/pyproject.toml index 95845cc9f9c..ffd44e243fb 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.9.0" +version = "0.9.2" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ From fdb90a87489684ee402f6fbc13b2b18449be90e9 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Thu, 14 May 2020 17:01:49 +0100 Subject: [PATCH 20/21] chore: renamed history to changelog dependabot --- python/{HISTORY.md => CHANGELOG.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename python/{HISTORY.md => CHANGELOG.md} (100%) diff --git a/python/HISTORY.md b/python/CHANGELOG.md similarity index 100% rename from python/HISTORY.md rename to python/CHANGELOG.md From d18a6dd41fc56bcf6bf14f62e3487043288e308f Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Sat, 16 May 2020 18:06:34 +0100 Subject: [PATCH 21/21] bugfix: #32 Runtime Error for nested sync fns --- python/CHANGELOG.md | 6 + .../aws_lambda_powertools/tracing/tracer.py | 125 +++++++++--------- python/example/hello_world/app.py | 17 ++- python/pyproject.toml | 2 +- python/tests/functional/test_tracing.py | 21 +++ python/tests/unit/test_tracing.py | 2 +- 6 files changed, 111 insertions(+), 62 deletions(-) diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index 894e2b1fc05..c3d8c76207e 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -1,5 +1,11 @@ # HISTORY +## May 16th + +**0.9.3** + +* **Tracer**: Bugfix - Runtime Error for nested sync due to incorrect loop usage + ## May 14th **0.9.2** diff --git a/python/aws_lambda_powertools/tracing/tracer.py b/python/aws_lambda_powertools/tracing/tracer.py index 022d8ef89a2..d3f5269ae78 100644 --- a/python/aws_lambda_powertools/tracing/tracer.py +++ b/python/aws_lambda_powertools/tracing/tracer.py @@ -1,4 +1,3 @@ -import asyncio import copy import functools import inspect @@ -250,10 +249,11 @@ def handler(event, context) err Exception raised by method """ + lambda_handler_name = lambda_handler.__name__ @functools.wraps(lambda_handler) def decorate(event, context): - with self.provider.in_subsegment(name=f"## {lambda_handler.__name__}") as subsegment: + with self.provider.in_subsegment(name=f"## {lambda_handler_name}") as subsegment: global is_cold_start if is_cold_start: logger.debug("Annotating cold start") @@ -265,13 +265,12 @@ def decorate(event, context): response = lambda_handler(event, context) logger.debug("Received lambda handler response successfully") logger.debug(response) - if response: - subsegment.put_metadata( - key="lambda handler response", value=response, namespace=self._config["service"] - ) + self._add_response_as_metadata( + function_name=lambda_handler_name, data=response, subsegment=subsegment + ) except Exception as err: - logger.exception("Exception received from lambda handler", exc_info=True) - subsegment.put_metadata(key=f"{self.service} error", value=err, namespace=self._config["service"]) + logger.exception("Exception received from lambda handler") + self._add_full_exception_as_metadata(function_name=self.service, error=err, subsegment=subsegment) raise return response @@ -392,71 +391,79 @@ async def async_tasks(): """ method_name = f"{method.__name__}" - async def decorate_logic( - decorated_method_with_args: functools.partial = None, - subsegment: aws_xray_sdk.core.models.subsegment = None, - coroutine: bool = False, - ) -> Any: - """Decorate logic runs both sync and async decorated methods - - Parameters - ---------- - decorated_method_with_args : functools.partial - Partial decorated method with arguments/keyword arguments - subsegment : aws_xray_sdk.core.models.subsegment - X-Ray subsegment to reuse - coroutine : bool, optional - Instruct whether partial decorated method is a wrapped coroutine, by default False - - Returns - ------- - Any - Returns method's response - """ - response = None - try: - logger.debug(f"Calling method: {method_name}") - if coroutine: - response = await decorated_method_with_args() - else: - response = decorated_method_with_args() - logger.debug(f"Received {method_name} response successfully") - logger.debug(response) - except Exception as err: - logger.exception(f"Exception received from '{method_name}' method", exc_info=True) - subsegment.put_metadata(key=f"{method_name} error", value=err, namespace=self._config["service"]) - raise - finally: - if response is not None: - subsegment.put_metadata( # pragma: no cover - key=f"{method_name} response", value=response, namespace=self._config["service"] - ) - - return response - if inspect.iscoroutinefunction(method): @functools.wraps(method) async def decorate(*args, **kwargs): - decorated_method_with_args = functools.partial(method, *args, **kwargs) async with self.provider.in_subsegment_async(name=f"## {method_name}") as subsegment: - return await decorate_logic( - decorated_method_with_args=decorated_method_with_args, subsegment=subsegment, coroutine=True - ) + try: + logger.debug(f"Calling method: {method_name}") + response = await method(*args, **kwargs) + self._add_response_as_metadata(function_name=method_name, data=response, subsegment=subsegment) + except Exception as err: + logger.exception(f"Exception received from '{method_name}' method") + self._add_full_exception_as_metadata( + function_name=method_name, error=err, subsegment=subsegment + ) + raise + + return response else: @functools.wraps(method) def decorate(*args, **kwargs): - loop = asyncio.get_event_loop() - decorated_method_with_args = functools.partial(method, *args, **kwargs) with self.provider.in_subsegment(name=f"## {method_name}") as subsegment: - return loop.run_until_complete( - decorate_logic(decorated_method_with_args=decorated_method_with_args, subsegment=subsegment) - ) + try: + logger.debug(f"Calling method: {method_name}") + response = method(*args, **kwargs) + self._add_response_as_metadata(function_name=method_name, data=response, subsegment=subsegment) + except Exception as err: + logger.exception(f"Exception received from '{method_name}' method") + self._add_full_exception_as_metadata( + function_name=method_name, error=err, subsegment=subsegment + ) + raise + + return response return decorate + def _add_response_as_metadata( + self, function_name: str = None, data: Any = None, subsegment: aws_xray_sdk.core.models.subsegment = None + ): + """Add response as metadata for given subsegment + + Parameters + ---------- + function_name : str, optional + function name to add as metadata key, by default None + data : Any, optional + data to add as subsegment metadata, by default None + subsegment : aws_xray_sdk.core.models.subsegment, optional + existing subsegment to add metadata on, by default None + """ + if data is None or subsegment is None: + return + + subsegment.put_metadata(key=f"{function_name} response", value=data, namespace=self._config["service"]) + + def _add_full_exception_as_metadata( + self, function_name: str = None, error: Exception = None, subsegment: aws_xray_sdk.core.models.subsegment = None + ): + """Add full exception object as metadata for given subsegment + + Parameters + ---------- + function_name : str, optional + function name to add as metadata key, by default None + error : Exception, optional + error to add as subsegment metadata, by default None + subsegment : aws_xray_sdk.core.models.subsegment, optional + existing subsegment to add metadata on, by default None + """ + subsegment.put_metadata(key=f"{function_name} error", value=error, namespace=self._config["service"]) + def __disable_tracing_provider(self): """Forcefully disables tracing""" logger.debug("Disabling tracer provider...") diff --git a/python/example/hello_world/app.py b/python/example/hello_world/app.py index 35f44de67d2..9a823a2861d 100644 --- a/python/example/hello_world/app.py +++ b/python/example/hello_world/app.py @@ -59,6 +59,21 @@ def my_middleware(handler, event, context, say_hello=False): return ret +@tracer.capture_method +def func_1(): + return 1 + + +@tracer.capture_method +def func_2(): + return 2 + + +@tracer.capture_method +def sums_values(): + return func_1() + func_2() # nested sync calls to reproduce issue #32 + + @metrics.log_metrics @tracer.capture_lambda_handler @my_middleware(say_hello=True) @@ -84,7 +99,7 @@ def lambda_handler(event, context): Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html """ - + sums_values() async_http_ret = asyncio.run(async_tasks()) if "charge_id" in event: diff --git a/python/pyproject.toml b/python/pyproject.toml index ffd44e243fb..51f92db882d 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "0.9.2" +version = "0.9.3" description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric" authors = ["Amazon Web Services"] classifiers=[ diff --git a/python/tests/functional/test_tracing.py b/python/tests/functional/test_tracing.py index 8ceb479190a..c74d735ac33 100644 --- a/python/tests/functional/test_tracing.py +++ b/python/tests/functional/test_tracing.py @@ -124,3 +124,24 @@ def test_tracer_reuse(): assert id(tracer_a) != id(tracer_b) assert tracer_a.__dict__.items() == tracer_b.__dict__.items() + + +def test_tracer_method_nested_sync(mocker): + # GIVEN tracer is disabled, decorator is used + # WHEN multiple sync functions are nested + # THEN tracer should not raise a Runtime Error + tracer = Tracer(disabled=True) + + @tracer.capture_method + def func_1(): + return 1 + + @tracer.capture_method + def func_2(): + return 2 + + @tracer.capture_method + def sums_values(): + return func_1() + func_2() + + sums_values() diff --git a/python/tests/unit/test_tracing.py b/python/tests/unit/test_tracing.py index f5ad586cb00..f79662601a5 100644 --- a/python/tests/unit/test_tracing.py +++ b/python/tests/unit/test_tracing.py @@ -91,7 +91,7 @@ def handler(event, context): assert in_subsegment_mock.in_subsegment.call_count == 1 assert in_subsegment_mock.in_subsegment.call_args == mocker.call(name="## handler") assert in_subsegment_mock.put_metadata.call_args == mocker.call( - key="lambda handler response", value=dummy_response, namespace="booking" + key="handler response", value=dummy_response, namespace="booking" ) assert in_subsegment_mock.put_annotation.call_count == 1 assert in_subsegment_mock.put_annotation.call_args == mocker.call(key="ColdStart", value=True)