diff --git a/.github/mergify.yml b/.github/mergify.yml index a98b7270199..8e010eeca29 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -1,28 +1,29 @@ +queue_rules: + - name: default + conditions: + # Conditions to get out of the queue (= merged) + - check-success=Semantic Pull Request + - "#approved-reviews-by>=1" + - -title~=(WIP|wip) + - -label~="do-not-merge" + - "#changes-requested-reviews-by=0" + pull_request_rules: - name: automatic merge for Dependabot pull requests conditions: - author~=^dependabot(|-preview)\[bot\]$ -# - check-success=build # matrix jobs aren't working in mergify - - -label~="do-not-merge" - - "#approved-reviews-by>=1" # until we exclude major versions in dependabot actions: - merge: - strict: false + queue: + name: default method: squash commit_message: title+body - name: Automatic merge ⬇️ on approval ✔ conditions: - base!=master - - "#approved-reviews-by>=1" - - "#changes-requested-reviews-by=0" - - -title~=(WIP|wip) -# - check-success=build # matrix jobs aren't working in mergify - - check-success=Semantic Pull Request - - body~=(?m)^\[X\] Meet tenets criteria + - "#approved-reviews-by>=2" actions: - merge: - strict: smart + queue: + name: default method: squash - strict_method: merge commit_message: title+body diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d99db965466..52d7ba29fa9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -3,10 +3,6 @@ name: "CodeQL" on: push: branches: [develop] - pull_request: - branches: [develop] - schedule: - - cron: '0 21 * * 0' jobs: analyze: diff --git a/.github/workflows/label_pr_on_title.yml b/.github/workflows/label_pr_on_title.yml new file mode 100644 index 00000000000..8d7871c899f --- /dev/null +++ b/.github/workflows/label_pr_on_title.yml @@ -0,0 +1,87 @@ +name: Label PR based on title + +on: + workflow_run: + workflows: ["Record PR number"] + types: + - completed + +jobs: + upload: + runs-on: ubuntu-latest + # Guardrails to only ever run if PR recording workflow was indeed + # run in a PR event and ran successfully + if: > + ${{ github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' }} + steps: + - name: 'Download artifact' + uses: actions/github-script@v5 + # For security, we only download artifacts tied to the successful PR recording workflow + with: + script: | + const fs = require('fs'); + + const artifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: ${{github.event.workflow_run.id }}, + }); + + const matchArtifact = artifacts.data.artifacts.filter(artifact => artifact.name == "pr")[0]; + + const artifact = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: 'zip', + }); + + fs.writeFileSync('${{github.workspace}}/pr.zip', Buffer.from(artifact.data)); + # NodeJS standard library doesn't provide ZIP capabilities; use system `unzip` command instead + - run: unzip pr.zip + + - name: 'Label PR based on title' + uses: actions/github-script@v5 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + # This safely runs in our base repo, not on fork + # thus allowing us to provide a write access token to label based on PR title + # and label PR based on semantic title accordingly + script: | + const fs = require('fs'); + const pr_number = Number(fs.readFileSync('./number')); + const pr_title = fs.readFileSync('./title', 'utf-8').trim(); + + const FEAT_REGEX = /feat(\((\w+)\))?(\:.+)/ + const BUG_REGEX = /(fix|bug)(\((\w+)\))?(\:.+)/ + const DOCS_REGEX = /(docs|doc)(\((\w+)\))?(\:.+)/ + const CHORE_REGEX = /(chore)(\((\w+)\))?(\:.+)/ + const DEPRECATED_REGEX = /(deprecated)(\((\w+)\))?(\:.+)/ + const REFACTOR_REGEX = /(refactor)(\((\w+)\))?(\:.+)/ + + const labels = { + "feature": FEAT_REGEX, + "bug": BUG_REGEX, + "documentation": DOCS_REGEX, + "internal": CHORE_REGEX, + "enhancement": REFACTOR_REGEX, + "deprecated": DEPRECATED_REGEX, + } + + for (const label in labels) { + const matcher = new RegExp(labels[label]) + const isMatch = matcher.exec(pr_title) + if (isMatch != null) { + console.info(`Auto-labeling PR ${pr_number} with ${label}`) + + await github.rest.issues.addLabels({ + issue_number: pr_number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: [label] + }) + + break + } + } diff --git a/.github/workflows/post_release.js b/.github/workflows/post_release.js new file mode 100644 index 00000000000..a174263db28 --- /dev/null +++ b/.github/workflows/post_release.js @@ -0,0 +1,112 @@ +const STAGED_LABEL = "status/staged-next-release"; + +/** + * Fetch issues using GitHub REST API + * + * @param {object} gh_client - Pre-authenticated REST client (Octokit) + * @param {string} org - GitHub Organization + * @param {string} repository - GitHub repository + * @param {string} state - GitHub issue state (open, closed) + * @param {string} label - Comma-separated issue labels to fetch + * @return {Object[]} issues - Array of issues matching params + * @see {@link https://octokit.github.io/rest.js/v18#usage|Octokit client} + */ +const fetchIssues = async ({ + gh_client, + org, + repository, + state = "open", + label = STAGED_LABEL, +}) => { + + try { + const { data: issues } = await gh_client.rest.issues.listForRepo({ + owner: org, + repo: repository, + state: state, + labels: label, + }); + + return issues; + + } catch (error) { + console.error(error); + throw new Error("Failed to fetch issues") + } + +}; + +/** + * Notify new release and close staged GitHub issue + * + * @param {object} gh_client - Pre-authenticated REST client (Octokit) + * @param {string} owner - GitHub Organization + * @param {string} repository - GitHub repository + * @param {string} release_version - GitHub Release version + * @see {@link https://octokit.github.io/rest.js/v18#usage|Octokit client} + */ +const notifyRelease = async ({ + gh_client, + owner, + repository, + release_version, +}) => { + const release_url = `https://github.com/${owner}/${repository}/releases/tag/v${release_version}`; + + const issues = await fetchIssues({ + gh_client: gh_client, + org: owner, + repository: repository, + }); + + issues.forEach(async (issue) => { + console.info(`Updating issue number ${issue.number}`); + + const comment = `This is now released under [${release_version}](${release_url}) version!`; + try { + await gh_client.rest.issues.createComment({ + owner: owner, + repo: repository, + body: comment, + issue_number: issue.number, + }); + } catch (error) { + console.error(error); + throw new Error(`Failed to update issue ${issue.number} about ${release_version} release`) + } + + + // Close issue and remove staged label; keep existing ones + const labels = issue.labels + .filter((label) => label.name != STAGED_LABEL) + .map((label) => label.name); + + try { + await gh_client.rest.issues.update({ + repo: repository, + owner: owner, + issue_number: issue.number, + state: "closed", + labels: labels, + }); + } catch (error) { + console.error(error); + throw new Error("Failed to close issue") + } + + console.info(`Issue number ${issue.number} closed and updated`); + }); +}; + +// context: https://github.com/actions/toolkit/blob/main/packages/github/src/context.ts +module.exports = async ({ github, context }) => { + const { RELEASE_TAG_VERSION } = process.env; + console.log(`Running post-release script for ${RELEASE_TAG_VERSION} version`); + + await notifyRelease({ + gh_client: github, + owner: context.repo.owner, + repository: context.repo.repo, + release_version: RELEASE_TAG_VERSION, + }); +}; diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 1a8f26db879..f1f589ca270 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -22,6 +22,7 @@ name: Publish to PyPi # 8. Builds a fresh version of docs including Changelog updates # 9. Push latest release source code to master using release title as the commit message # 10. Builds latest documentation for new release, and update latest alias pointing to the new release tag +# 11. Close and notify all issues labeled "status/staged-next-release" about the release details # # === Fallback mechanism due to external failures === @@ -107,6 +108,12 @@ jobs: publish_dir: ./api keep_files: true destination_dir: latest/api + - name: Close issues related to this release + uses: actions/github-script@v5 + with: + script: | + const post_release = require('.github/workflows/post_release.js') + await post_release({github, context, core}) sync_master: needs: release diff --git a/.github/workflows/record_pr.yml b/.github/workflows/record_pr.yml new file mode 100644 index 00000000000..7ec726443cf --- /dev/null +++ b/.github/workflows/record_pr.yml @@ -0,0 +1,21 @@ +name: Record PR number + +on: + pull_request: + types: [opened, edited] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Save PR number + run: | + mkdir -p ./pr + echo ${{ github.event.number }} > ./pr/number + echo "${{ github.event.pull_request.title }}" > ./pr/title + - uses: actions/upload-artifact@v2 + with: + name: pr + path: pr/ diff --git a/CHANGELOG.md b/CHANGELOG.md index e90fa14021a..36e435cb6f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,35 @@ All notable changes to this project will be documented in this file. This project follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format for changes and adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.24.1 - 2022-01-20 + +### Bug Fixes + +* **batch:** report multiple failures ([#967](https://github.com/awslabs/aws-lambda-powertools-python/issues/967)) +* **data-classes:** docstring typos and clean up ([#937](https://github.com/awslabs/aws-lambda-powertools-python/issues/937)) +* **parameters:** appconfig internal _get docstrings ([#934](https://github.com/awslabs/aws-lambda-powertools-python/issues/934)) + +### Documentation + +* rename quickstart to tutorial in readme +* **batch:** snippet typo on batch processed messages iteration ([#951](https://github.com/awslabs/aws-lambda-powertools-python/issues/951)) +* **batch:** fix typo in context manager keyword ([#938](https://github.com/awslabs/aws-lambda-powertools-python/issues/938)) +* **homepage:** link to typescript version ([#950](https://github.com/awslabs/aws-lambda-powertools-python/issues/950)) +* **install:** new lambda layer for 1.24.0 release +* **metrics:** keep it consistent with other sections, update metric names +* **nav:** make REST and GraphQL event handlers more explicit ([#959](https://github.com/awslabs/aws-lambda-powertools-python/issues/959)) +* **quickstart:** expand on intro line +* **quickstart:** tidy requirements up +* **quickstart:** same process for Logger +* **quickstart:** add sub-sections, fix highlight & code +* **quickstart:** sentence fragmentation, tidy up +* **quickstart:** make section agnostic to json lib +* **tenets:** make core, non-core more explicit +* **tracer:** update ServiceLens image w/ API GW, copywriting +* **tracer:** warning to note on local traces +* **tracer:** add initial image, requirements +* **tracer:** add annotation, metadata, and image + ## 1.24.0 - 2021-12-31 ### Bug Fixes diff --git a/README.md b/README.md index 46c5db5ad6c..56ad5c0b70c 100644 --- a/README.md +++ b/README.md @@ -4,11 +4,9 @@ [![codecov.io](https://codecov.io/github/awslabs/aws-lambda-powertools-python/branch/develop/graphs/badge.svg)](https://app.codecov.io/gh/awslabs/aws-lambda-powertools-python) ![PythonSupport](https://img.shields.io/static/v1?label=python&message=3.6%20|%203.7|%203.8|%203.9&color=blue?style=flat-square&logo=python) ![PyPI version](https://badge.fury.io/py/aws-lambda-powertools.svg) ![PyPi monthly downloads](https://img.shields.io/pypi/dm/aws-lambda-powertools) -A suite of Python utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, and more. ([AWS Lambda Powertools Java](https://github.com/awslabs/aws-lambda-powertools-java) is also available). +A suite of Python utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, and more. (AWS Lambda Powertools [Java](https://github.com/awslabs/aws-lambda-powertools-java) and [Typescript](https://github.com/awslabs/aws-lambda-powertools-typescript) is also available). - - -**[📜Documentation](https://awslabs.github.io/aws-lambda-powertools-python/)** | **[🐍PyPi](https://pypi.org/project/aws-lambda-powertools/)** | **[Roadmap](https://github.com/awslabs/aws-lambda-powertools-roadmap/projects/1)** | **[Quick hello world example](https://github.com/aws-samples/cookiecutter-aws-sam-python)** | **[Detailed blog post](https://aws.amazon.com/blogs/opensource/simplifying-serverless-best-practices-with-lambda-powertools/)** +**[📜Documentation](https://awslabs.github.io/aws-lambda-powertools-python/)** | **[🐍PyPi](https://pypi.org/project/aws-lambda-powertools/)** | **[Roadmap](https://github.com/awslabs/aws-lambda-powertools-roadmap/projects/1)** | **[Detailed blog post](https://aws.amazon.com/blogs/opensource/simplifying-serverless-best-practices-with-lambda-powertools/)** > **An AWS Developer Acceleration (DevAx) initiative by Specialist Solution Architects | aws-devax-open-source@amazon.com** @@ -29,12 +27,15 @@ A suite of Python utilities for AWS Lambda functions to ease adopting best pract * **[Idempotency](https://awslabs.github.io/aws-lambda-powertools-python/latest/utilities/idempotency/)** - Convert your Lambda functions into idempotent operations which are safe to retry * **[Feature Flags](https://awslabs.github.io/aws-lambda-powertools-python/latest/utilities/feature_flags/)** - A simple rule engine to evaluate when one or multiple features should be enabled depending on the input + ### Installation With [pip](https://pip.pypa.io/en/latest/index.html) installed, run: ``pip install aws-lambda-powertools`` -## Examples +## Tutorial and Examples + +* [Tutorial](https://awslabs.github.io/aws-lambda-powertools-python/latest/tutorial) * [Serverless Shopping cart](https://github.com/aws-samples/aws-serverless-shopping-cart) * [Serverless Airline](https://github.com/aws-samples/aws-serverless-airline-booking) * [Serverless E-commerce platform](https://github.com/aws-samples/aws-serverless-ecommerce-platform) @@ -45,10 +46,9 @@ With [pip](https://pip.pypa.io/en/latest/index.html) installed, run: ``pip insta * Structured logging initial implementation from [aws-lambda-logging](https://gitlab.com/hadrien/aws_lambda_logging) * Powertools idea [DAZN Powertools](https://github.com/getndazn/dazn-lambda-powertools/) - ## Connect -* **AWS Developers Slack**: `#lambda-powertools`** - **[Invite, if you don't have an account](https://join.slack.com/t/awsdevelopers/shared_invite/zt-yryddays-C9fkWrmguDv0h2EEDzCqvw)** +* **AWS Developers Slack**: `#lambda-powertools` - **[Invite, if you don't have an account](https://join.slack.com/t/awsdevelopers/shared_invite/zt-yryddays-C9fkWrmguDv0h2EEDzCqvw)** * **Email**: aws-lambda-powertools-feedback@amazon.com ## License diff --git a/aws_lambda_powertools/event_handler/api_gateway.py b/aws_lambda_powertools/event_handler/api_gateway.py index 30c13ada6b5..5017597c0f1 100644 --- a/aws_lambda_powertools/event_handler/api_gateway.py +++ b/aws_lambda_powertools/event_handler/api_gateway.py @@ -101,7 +101,7 @@ def __init__( only be used during development. allow_headers: Optional[List[str]] The list of additional allowed headers. This list is added to list of - built in allowed headers: `Authorization`, `Content-Type`, `X-Amz-Date`, + built-in allowed headers: `Authorization`, `Content-Type`, `X-Amz-Date`, `X-Api-Key`, `X-Amz-Security-Token`. expose_headers: Optional[List[str]] A list of values to return for the Access-Control-Expose-Headers diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py new file mode 100644 index 00000000000..f0e39ddf8f0 --- /dev/null +++ b/aws_lambda_powertools/logging/utils.py @@ -0,0 +1,81 @@ +import logging +from typing import Callable, List, Optional, Set, Union + +from .logger import Logger + + +def copy_config_to_registered_loggers( + source_logger: Logger, + log_level: Optional[str] = None, + exclude: Optional[Set[str]] = None, + include: Optional[Set[str]] = None, +) -> None: + + """Copies source Logger level and handler to all registered loggers for consistent formatting. + + Parameters + ---------- + source_logger : Logger + Powertools Logger to copy configuration from + log_level : str, optional + Logging level to set to registered loggers, by default uses source_logger logging level + include : Optional[Set[str]], optional + List of logger names to include, by default all registered loggers are included + exclude : Optional[Set[str]], optional + List of logger names to exclude, by default None + """ + + level = log_level or source_logger.level + + # Assumptions: Only take parent loggers not children (dot notation rule) + # Steps: + # 1. Default operation: Include all registered loggers + # 2. Only include set? Only add Loggers in the list and ignore all else + # 3. Include and exclude set? Add Logger if it’s in include and not in exclude + # 4. Only exclude set? Ignore Logger in the excluding list + + # Exclude source logger by default + if exclude: + exclude.add(source_logger.name) + else: + exclude = set(source_logger.name) + + # Prepare loggers set + if include: + loggers = include.difference(exclude) + filter_func = _include_registered_loggers_filter + else: + loggers = exclude + filter_func = _exclude_registered_loggers_filter + + registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func) + for logger in registered_loggers: + _configure_logger(source_logger, logger, level) + + +def _include_registered_loggers_filter(loggers: Set[str]): + return [logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name in loggers] + + +def _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]: + return [ + logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name not in loggers + ] + + +def _find_registered_loggers( + source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]] +) -> List[logging.Logger]: + """Filter root loggers based on provided parameters.""" + root_loggers = filter_func(loggers) + source_logger.debug(f"Filtered root loggers: {root_loggers}") + return root_loggers + + +def _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None: + logger.handlers = [] + logger.setLevel(level) + source_logger.debug(f"Logger {logger} reconfigured to use logging level {level}") + for source_handler in source_logger.handlers: + logger.addHandler(source_handler) + source_logger.debug(f"Logger {logger} reconfigured to use {source_handler}") diff --git a/aws_lambda_powertools/utilities/batch/base.py b/aws_lambda_powertools/utilities/batch/base.py index d8fdc2d85f2..21b59328ef0 100644 --- a/aws_lambda_powertools/utilities/batch/base.py +++ b/aws_lambda_powertools/utilities/batch/base.py @@ -385,7 +385,7 @@ def _clean(self): ) messages = self._get_messages_to_report() - self.batch_response = {"batchItemFailures": [messages]} + self.batch_response = {"batchItemFailures": messages} def _has_messages_to_report(self) -> bool: if self.fail_messages: @@ -397,7 +397,7 @@ def _has_messages_to_report(self) -> bool: def _entire_batch_failed(self) -> bool: return len(self.exceptions) == len(self.records) - def _get_messages_to_report(self) -> Dict[str, str]: + def _get_messages_to_report(self) -> List[Dict[str, str]]: """ Format messages to use in batch deletion """ @@ -406,20 +406,25 @@ def _get_messages_to_report(self) -> Dict[str, str]: # Event Source Data Classes follow python idioms for fields # while Parser/Pydantic follows the event field names to the latter def _collect_sqs_failures(self): - if self.model: - return {"itemIdentifier": msg.messageId for msg in self.fail_messages} - return {"itemIdentifier": msg.message_id for msg in self.fail_messages} + failures = [] + for msg in self.fail_messages: + msg_id = msg.messageId if self.model else msg.message_id + failures.append({"itemIdentifier": msg_id}) + return failures def _collect_kinesis_failures(self): - if self.model: - # Pydantic model uses int but Lambda poller expects str - return {"itemIdentifier": msg.kinesis.sequenceNumber for msg in self.fail_messages} - return {"itemIdentifier": msg.kinesis.sequence_number for msg in self.fail_messages} + failures = [] + for msg in self.fail_messages: + msg_id = msg.kinesis.sequenceNumber if self.model else msg.kinesis.sequence_number + failures.append({"itemIdentifier": msg_id}) + return failures def _collect_dynamodb_failures(self): - if self.model: - return {"itemIdentifier": msg.dynamodb.SequenceNumber for msg in self.fail_messages} - return {"itemIdentifier": msg.dynamodb.sequence_number for msg in self.fail_messages} + failures = [] + for msg in self.fail_messages: + msg_id = msg.dynamodb.SequenceNumber if self.model else msg.dynamodb.sequence_number + failures.append({"itemIdentifier": msg_id}) + return failures @overload def _to_batch_type(self, record: dict, event_type: EventType, model: "BatchTypeModels") -> "BatchTypeModels": diff --git a/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py b/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py index 4682711af92..327d37238aa 100644 --- a/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py +++ b/aws_lambda_powertools/utilities/data_classes/api_gateway_authorizer_event.py @@ -28,11 +28,12 @@ def __init__( self.api_id = api_id self.stage = stage self.http_method = http_method - self.resource = resource + # Remove matching "/" from `resource`. + self.resource = resource.lstrip("/") @property def arn(self) -> str: - """Build an arn from it's parts + """Build an arn from its parts eg: arn:aws:execute-api:us-east-1:123456789012:abcdef123/test/GET/request""" return ( f"arn:{self.partition}:execute-api:{self.region}:{self.aws_account_id}:{self.api_id}/{self.stage}/" @@ -168,7 +169,7 @@ def get_header_value( default_value: str, optional Default value if no value was found by name case_sensitive: bool - Whether to use a case sensitive look up + Whether to use a case-sensitive look up Returns ------- str, optional @@ -270,7 +271,7 @@ def get_header_value( default_value: str, optional Default value if no value was found by name case_sensitive: bool - Whether to use a case sensitive look up + Whether to use a case-sensitive look up Returns ------- str, optional @@ -440,9 +441,6 @@ def _add_route(self, effect: str, http_method: str, resource: str, conditions: O if not self._resource_pattern.match(resource): raise ValueError(f"Invalid resource path: {resource}. Path should match {self.path_regex}") - if resource[:1] == "/": - resource = resource[1:] - resource_arn = APIGatewayRouteArn( self.region, self.aws_account_id, self.api_id, self.stage, http_method, resource ).arn diff --git a/aws_lambda_powertools/utilities/data_classes/appsync_resolver_event.py b/aws_lambda_powertools/utilities/data_classes/appsync_resolver_event.py index 56d37851631..89f774293e7 100644 --- a/aws_lambda_powertools/utilities/data_classes/appsync_resolver_event.py +++ b/aws_lambda_powertools/utilities/data_classes/appsync_resolver_event.py @@ -27,7 +27,7 @@ def source_ip(self) -> List[str]: @property def username(self) -> str: - """The user name of the authenticated user. IAM user principal""" + """The username of the authenticated user. IAM user principal""" return self["username"] @property @@ -72,7 +72,7 @@ def source_ip(self) -> List[str]: @property def username(self) -> str: - """The user name of the authenticated user.""" + """The username of the authenticated user.""" return self["username"] @property @@ -172,7 +172,7 @@ def arguments(self) -> Dict[str, Any]: def identity(self) -> Union[None, AppSyncIdentityIAM, AppSyncIdentityCognito]: """An object that contains information about the caller. - Depending of the type of identify found: + Depending on the type of identify found: - API_KEY authorization - returns None - AWS_IAM authorization - returns AppSyncIdentityIAM @@ -223,7 +223,7 @@ def get_header_value( default_value: str, optional Default value if no value was found by name case_sensitive: bool - Whether to use a case sensitive look up + Whether to use a case-sensitive look up Returns ------- str, optional diff --git a/aws_lambda_powertools/utilities/data_classes/cognito_user_pool_event.py b/aws_lambda_powertools/utilities/data_classes/cognito_user_pool_event.py index df2726ee722..a97bf26a16f 100644 --- a/aws_lambda_powertools/utilities/data_classes/cognito_user_pool_event.py +++ b/aws_lambda_powertools/utilities/data_classes/cognito_user_pool_event.py @@ -195,7 +195,7 @@ def final_user_status(self) -> Optional[str]: @final_user_status.setter def final_user_status(self, value: str): """During sign-in, this attribute can be set to CONFIRMED, or not set, to auto-confirm your users and - allow them to sign-in with their previous passwords. This is the simplest experience for the user. + allow them to sign in with their previous passwords. This is the simplest experience for the user. If this attribute is set to RESET_REQUIRED, the user is required to change his or her password immediately after migration at the time of sign-in, and your client app needs to handle the PasswordResetRequiredException @@ -333,7 +333,7 @@ class CustomMessageTriggerEvent(BaseTriggerEvent): verification code automatically to the user. Cannot be used for other attributes. - `CustomMessage_VerifyUserAttribute` This trigger sends a verification code to the user when they manually request it for a new email or phone number. - - `CustomMessage_Authentication` To send MFA code during authentication. + - `CustomMessage_Authentication` To send MFA codes during authentication. Documentation: -------------- @@ -590,7 +590,7 @@ def user_attributes(self) -> Dict[str, str]: @property def user_not_found(self) -> Optional[bool]: """A Boolean that is populated when PreventUserExistenceErrors is set to ENABLED for your user pool client. - A value of true means that the user id (user name, email address, etc.) did not match any existing users.""" + A value of true means that the user id (username, email address, etc.) did not match any existing users.""" return self["request"].get("userNotFound") @property @@ -601,7 +601,7 @@ def session(self) -> List[ChallengeResult]: @property def client_metadata(self) -> Optional[Dict[str, str]]: """One or more key-value pairs that you can provide as custom input to the Lambda function that you specify - for the define auth challenge trigger.""" + for the defined auth challenge trigger.""" return self["request"].get("clientMetadata") @@ -687,7 +687,7 @@ def session(self) -> List[ChallengeResult]: @property def client_metadata(self) -> Optional[Dict[str, str]]: """One or more key-value pairs that you can provide as custom input to the Lambda function that you - specify for the create auth challenge trigger.""" + specify for the creation auth challenge trigger.""" return self["request"].get("clientMetadata") @@ -699,7 +699,7 @@ def public_challenge_parameters(self) -> Dict[str, str]: @public_challenge_parameters.setter def public_challenge_parameters(self, value: Dict[str, str]): """One or more key-value pairs for the client app to use in the challenge to be presented to the user. - This parameter should contain all of the necessary information to accurately present the challenge to + This parameter should contain all the necessary information to accurately present the challenge to the user.""" self["response"]["publicChallengeParameters"] = value @@ -709,8 +709,8 @@ def private_challenge_parameters(self) -> Dict[str, str]: @private_challenge_parameters.setter def private_challenge_parameters(self, value: Dict[str, str]): - """This parameter is only used by the Verify Auth Challenge Response Lambda trigger. - This parameter should contain all of the information that is required to validate the user's + """This parameter is only used by the "Verify Auth Challenge" Response Lambda trigger. + This parameter should contain all the information that is required to validate the user's response to the challenge. In other words, the publicChallengeParameters parameter contains the question that is presented to the user and privateChallengeParameters contains the valid answers for the question.""" @@ -730,7 +730,7 @@ class CreateAuthChallengeTriggerEvent(BaseTriggerEvent): """Create Auth Challenge Lambda Trigger Amazon Cognito invokes this trigger after Define Auth Challenge if a custom challenge has been - specified as part of the Define Auth Challenge trigger. + specified as part of the "Define Auth Challenge" trigger. It creates a custom authentication flow. Notes: @@ -775,7 +775,7 @@ def challenge_answer(self) -> Any: @property def client_metadata(self) -> Optional[Dict[str, str]]: """One or more key-value pairs that you can provide as custom input to the Lambda function that - you specify for the verify auth challenge trigger.""" + you specify for the "Verify Auth Challenge" trigger.""" return self["request"].get("clientMetadata") @property diff --git a/aws_lambda_powertools/utilities/data_classes/dynamo_db_stream_event.py b/aws_lambda_powertools/utilities/data_classes/dynamo_db_stream_event.py index 01d892f9edc..7e209fab3e2 100644 --- a/aws_lambda_powertools/utilities/data_classes/dynamo_db_stream_event.py +++ b/aws_lambda_powertools/utilities/data_classes/dynamo_db_stream_event.py @@ -231,7 +231,7 @@ def aws_region(self) -> Optional[str]: @property def dynamodb(self) -> Optional[StreamRecord]: - """The main body of the stream record, containing all of the DynamoDB-specific fields.""" + """The main body of the stream record, containing all the DynamoDB-specific fields.""" stream_record = self.get("dynamodb") return None if stream_record is None else StreamRecord(stream_record) diff --git a/aws_lambda_powertools/utilities/data_classes/s3_object_event.py b/aws_lambda_powertools/utilities/data_classes/s3_object_event.py index b22434c68e3..d4f97b725bf 100644 --- a/aws_lambda_powertools/utilities/data_classes/s3_object_event.py +++ b/aws_lambda_powertools/utilities/data_classes/s3_object_event.py @@ -82,7 +82,7 @@ def get_header_value( default_value: str, optional Default value if no value was found by name case_sensitive: bool - Whether to use a case sensitive look up + Whether to use a case-sensitive look up Returns ------- str, optional @@ -128,7 +128,7 @@ def creation_date(self) -> str: @property def mfa_authenticated(self) -> str: """The value is true if the root user or IAM user whose credentials were used for the request also was - authenticated with an MFA device; otherwise, false..""" + authenticated with an MFA device; otherwise, false.""" return self["mfaAuthenticated"] diff --git a/aws_lambda_powertools/utilities/data_classes/sqs_event.py b/aws_lambda_powertools/utilities/data_classes/sqs_event.py index 57caeea4cc2..1b93a775bca 100644 --- a/aws_lambda_powertools/utilities/data_classes/sqs_event.py +++ b/aws_lambda_powertools/utilities/data_classes/sqs_event.py @@ -76,7 +76,7 @@ def data_type(self) -> str: class SQSMessageAttributes(Dict[str, SQSMessageAttribute]): def __getitem__(self, key: str) -> Optional[SQSMessageAttribute]: # type: ignore - item = super(SQSMessageAttributes, self).get(key) + item = super().get(key) return None if item is None else SQSMessageAttribute(item) # type: ignore diff --git a/aws_lambda_powertools/utilities/parameters/appconfig.py b/aws_lambda_powertools/utilities/parameters/appconfig.py index d1613c14513..3455617e952 100644 --- a/aws_lambda_powertools/utilities/parameters/appconfig.py +++ b/aws_lambda_powertools/utilities/parameters/appconfig.py @@ -92,10 +92,8 @@ def _get(self, name: str, **sdk_options) -> str: ---------- name: str Name of the configuration - environment: str - Environment of the configuration sdk_options: dict, optional - Dictionary of options that will be passed to the Parameter Store get_parameter API call + Dictionary of options that will be passed to the client's get_configuration API call """ sdk_options["Configuration"] = name @@ -140,7 +138,7 @@ def get_app_config( max_age: int Maximum age of the cached value sdk_options: dict, optional - Dictionary of options that will be passed to the Parameter Store get_parameter API call + Dictionary of options that will be passed to the boto client get_configuration API call Raises ------ @@ -163,7 +161,7 @@ def get_app_config( **Retrieves a configuration value and decodes it using a JSON decoder** - >>> from aws_lambda_powertools.utilities.parameters import get_parameter + >>> from aws_lambda_powertools.utilities.parameters import get_app_config >>> >>> value = get_app_config("my_config", environment="my_env", application="my_env", transform='json') >>> diff --git a/docs/core/event_handler/api_gateway.md b/docs/core/event_handler/api_gateway.md index 2e5c8af532a..ae0960875c2 100644 --- a/docs/core/event_handler/api_gateway.md +++ b/docs/core/event_handler/api_gateway.md @@ -1,5 +1,5 @@ --- -title: API Gateway +title: REST API description: Core utility --- diff --git a/docs/core/event_handler/appsync.md b/docs/core/event_handler/appsync.md index 261440004cc..a4d5f635886 100644 --- a/docs/core/event_handler/appsync.md +++ b/docs/core/event_handler/appsync.md @@ -1,5 +1,5 @@ --- -title: Appsync +title: GraphQL API description: Core utility --- diff --git a/docs/core/logger.md b/docs/core/logger.md index 6b3370be174..31f21147659 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -1066,6 +1066,25 @@ def handler(event: Dict, context: LambdaContext) -> List: return response.get("Buckets", []) ``` +**How can I enable powertools logging for imported libraries?** + +You can copy the Logger setup to all or sub-sets of registered external loggers. Use the `copy_config_to_registered_logger` method to do this. By default all registered loggers will be modified. You can change this behaviour by providing `include` and `exclude` attributes. You can also provide optional `log_level` attribute external loggers will be configured with. + + +```python hl_lines="10" title="Cloning Logger config to all other registered standard loggers" +import logging + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging import utils + +logger = Logger() + +external_logger = logging.logger() + +utils.copy_config_to_registered_loggers(source_logger=logger) +external_logger.info("test message") +``` + **What's the difference between `append_keys` and `extra`?** Keys added with `append_keys` will persist across multiple log messages while keys added via `extra` will only be available in a given log message operation. diff --git a/docs/index.md b/docs/index.md index 5683fdd38be..cb970ed9792 100644 --- a/docs/index.md +++ b/docs/index.md @@ -11,7 +11,7 @@ A suite of utilities for AWS Lambda functions to ease adopting best practices su ## Tenets -This project separates core utilities that will be available in other runtimes vs general utilities that might not be available across all runtimes. +Core utilities such as Tracing, Logging, Metrics, and Event Handler will be available across all Lambda Powertools runtimes. Additional utilities are subjective to each language ecosystem and customer demand. * **AWS Lambda only**. We optimise for AWS Lambda function environments and supported runtimes only. Utilities might work with web frameworks and non-Lambda environments, though they are not officially supported. * **Eases the adoption of best practices**. The main priority of the utilities is to facilitate best practices adoption, as defined in the AWS Well-Architected Serverless Lens; all other functionality is optional. @@ -24,7 +24,7 @@ This project separates core utilities that will be available in other runtimes v Powertools is available in the following formats: -* **Lambda Layer**: [**arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:**](#){: .copyMe} +* **Lambda Layer**: [**arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:**](#){: .copyMe} * **PyPi**: **`pip install aws-lambda-powertools`** ### Lambda Layer @@ -37,23 +37,23 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: | Region | Layer ARN |--------------------------- | --------------------------- - | `us-east-1` | [arn:aws:lambda:us-east-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `us-east-2` | [arn:aws:lambda:us-east-2:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `us-west-1` | [arn:aws:lambda:us-west-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `us-west-2` | [arn:aws:lambda:us-west-2:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ap-south-1` | [arn:aws:lambda:ap-south-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ap-northeast-1` | [arn:aws:lambda:ap-northeast-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ap-northeast-2` | [arn:aws:lambda:ap-northeast-2:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ap-northeast-3` | [arn:aws:lambda:ap-northeast-3:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ap-southeast-1` | [arn:aws:lambda:ap-southeast-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ap-southeast-2` | [arn:aws:lambda:ap-southeast-2:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `eu-central-1` | [arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `eu-west-1` | [arn:aws:lambda:eu-west-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `eu-west-2` | [arn:aws:lambda:eu-west-2:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `eu-west-3` | [arn:aws:lambda:eu-west-3:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `eu-north-1` | [arn:aws:lambda:eu-north-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `ca-central-1` | [arn:aws:lambda:ca-central-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} - | `sa-east-1` | [arn:aws:lambda:sa-east-1:017000801446:layer:AWSLambdaPowertoolsPython:6 :clipboard:](#){: .copyMe} + | `us-east-1` | [arn:aws:lambda:us-east-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `us-east-2` | [arn:aws:lambda:us-east-2:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `us-west-1` | [arn:aws:lambda:us-west-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `us-west-2` | [arn:aws:lambda:us-west-2:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ap-south-1` | [arn:aws:lambda:ap-south-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ap-northeast-1` | [arn:aws:lambda:ap-northeast-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ap-northeast-2` | [arn:aws:lambda:ap-northeast-2:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ap-northeast-3` | [arn:aws:lambda:ap-northeast-3:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ap-southeast-1` | [arn:aws:lambda:ap-southeast-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ap-southeast-2` | [arn:aws:lambda:ap-southeast-2:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `eu-central-1` | [arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `eu-west-1` | [arn:aws:lambda:eu-west-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `eu-west-2` | [arn:aws:lambda:eu-west-2:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `eu-west-3` | [arn:aws:lambda:eu-west-3:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `eu-north-1` | [arn:aws:lambda:eu-north-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `ca-central-1` | [arn:aws:lambda:ca-central-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} + | `sa-east-1` | [arn:aws:lambda:sa-east-1:017000801446:layer:AWSLambdaPowertoolsPython:7 :clipboard:](#){: .copyMe} === "SAM" @@ -62,7 +62,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: Type: AWS::Serverless::Function Properties: Layers: - - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:6 + - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:7 ``` === "Serverless framework" @@ -72,7 +72,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: hello: handler: lambda_function.lambda_handler layers: - - arn:aws:lambda:${aws:region}:017000801446:layer:AWSLambdaPowertoolsPython:6 + - arn:aws:lambda:${aws:region}:017000801446:layer:AWSLambdaPowertoolsPython:7 ``` === "CDK" @@ -88,7 +88,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: powertools_layer = aws_lambda.LayerVersion.from_layer_version_arn( self, id="lambda-powertools", - layer_version_arn=f"arn:aws:lambda:{env.region}:017000801446:layer:AWSLambdaPowertoolsPython:6" + layer_version_arn=f"arn:aws:lambda:{env.region}:017000801446:layer:AWSLambdaPowertoolsPython:7" ) aws_lambda.Function(self, 'sample-app-lambda', @@ -137,7 +137,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: role = aws_iam_role.iam_for_lambda.arn handler = "index.test" runtime = "python3.9" - layers = ["arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:6"] + layers = ["arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:7"] source_code_hash = filebase64sha256("lambda_function_payload.zip") } @@ -156,7 +156,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: ? Do you want to configure advanced settings? Yes ... ? Do you want to enable Lambda layers for this function? Yes - ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:6 + ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:7 ❯ amplify push -y @@ -167,7 +167,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: - Name: ? Which setting do you want to update? Lambda layers configuration ? Do you want to enable Lambda layers for this function? Yes - ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:6 + ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:7 ? Do you want to edit the local lambda function now? No ``` @@ -175,7 +175,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: Change {region} to your AWS region, e.g. `eu-west-1` ```bash title="AWS CLI" - aws lambda get-layer-version-by-arn --arn arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:6 --region {region} + aws lambda get-layer-version-by-arn --arn arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:7 --region {region} ``` The pre-signed URL to download this Lambda Layer will be within `Location` key. @@ -215,7 +215,7 @@ If using SAM, you can include this SAR App as part of your shared Layers stack, Properties: Location: ApplicationId: arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer - SemanticVersion: 1.23.0 # change to latest semantic version available in SAR + SemanticVersion: 1.24.0 # change to latest semantic version available in SAR MyLambdaFunction: Type: AWS::Serverless::Function @@ -243,7 +243,7 @@ If using SAM, you can include this SAR App as part of your shared Layers stack, Location: ApplicationId: arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer # Find latest from github.com/awslabs/aws-lambda-powertools-python/releases - SemanticVersion: 1.23.0 + SemanticVersion: 1.24.0 ``` === "CDK" diff --git a/docs/media/cloudwatch_logs_insight_example.png b/docs/media/cloudwatch_logs_insight_example.png new file mode 100644 index 00000000000..77569eec39a Binary files /dev/null and b/docs/media/cloudwatch_logs_insight_example.png differ diff --git a/docs/media/metrics_utility_showcase.png b/docs/media/metrics_utility_showcase.png new file mode 100644 index 00000000000..d81eabed4e9 Binary files /dev/null and b/docs/media/metrics_utility_showcase.png differ diff --git a/docs/media/tracer_utility_showcase_2.png b/docs/media/tracer_utility_showcase_2.png new file mode 100644 index 00000000000..6af495fdc8b Binary files /dev/null and b/docs/media/tracer_utility_showcase_2.png differ diff --git a/docs/media/tracer_utility_showcase_3.png b/docs/media/tracer_utility_showcase_3.png new file mode 100644 index 00000000000..d4ef99ebbdd Binary files /dev/null and b/docs/media/tracer_utility_showcase_3.png differ diff --git a/docs/media/tracer_xray_sdk_enriched.png b/docs/media/tracer_xray_sdk_enriched.png new file mode 100644 index 00000000000..1809cf16de4 Binary files /dev/null and b/docs/media/tracer_xray_sdk_enriched.png differ diff --git a/docs/media/tracer_xray_sdk_enriched_2.png b/docs/media/tracer_xray_sdk_enriched_2.png new file mode 100644 index 00000000000..b15594f33b4 Binary files /dev/null and b/docs/media/tracer_xray_sdk_enriched_2.png differ diff --git a/docs/media/tracer_xray_sdk_showcase.png b/docs/media/tracer_xray_sdk_showcase.png new file mode 100644 index 00000000000..ded9e1ee909 Binary files /dev/null and b/docs/media/tracer_xray_sdk_showcase.png differ diff --git a/docs/tutorial/idempotency.md b/docs/tutorial/idempotency.md new file mode 100644 index 00000000000..4b03b66abd4 --- /dev/null +++ b/docs/tutorial/idempotency.md @@ -0,0 +1,1009 @@ +--- +title: Idempotency +description: Utility +--- + +The idempotency utility provides a simple solution to convert your Lambda functions into idempotent operations which +are safe to retry. + +## Terminology + +The property of idempotency means that an operation does not cause additional side effects if it is called more than +once with the same input parameters. + +**Idempotent operations will return the same result when they are called multiple +times with the same parameters**. This makes idempotent operations safe to retry. + +**Idempotency key** is a hash representation of either the entire event or a specific configured subset of the event, and invocation results are **JSON serialized** and stored in your persistence storage layer. + +## Key features + +* Prevent Lambda handler from executing more than once on the same event payload during a time window +* Ensure Lambda handler returns the same result when called with the same payload +* Select a subset of the event as the idempotency key using JMESPath expressions +* Set a time window in which records with the same payload should be considered duplicates + +## Getting started + +### Required resources + +Before getting started, you need to create a persistent storage layer where the idempotency utility can store its state - your lambda functions will need read and write access to it. + +As of now, Amazon DynamoDB is the only supported persistent storage layer, so you'll need to create a table first. + +**Default table configuration** + +If you're not [changing the default configuration for the DynamoDB persistence layer](#dynamodbpersistencelayer), this is the expected default configuration: + +Configuration | Value | Notes +------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- +Partition key | `id` | +TTL attribute name | `expiration` | This can only be configured after your table is created if you're using AWS Console + +???+ tip "Tip: You can share a single state table for all functions" + You can reuse the same DynamoDB table to store idempotency state. We add your `function_name` in addition to the idempotency key as a hash key. + +```yaml hl_lines="5-13 21-23" title="AWS Serverless Application Model (SAM) example" +Resources: + IdempotencyTable: + Type: AWS::DynamoDB::Table + Properties: + AttributeDefinitions: + - AttributeName: id + AttributeType: S + KeySchema: + - AttributeName: id + KeyType: HASH + TimeToLiveSpecification: + AttributeName: expiration + Enabled: true + BillingMode: PAY_PER_REQUEST + + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + Runtime: python3.8 + ... + Policies: + - DynamoDBCrudPolicy: + TableName: !Ref IdempotencyTable +``` + +???+ warning "Warning: Large responses with DynamoDB persistence layer" + When using this utility with DynamoDB, your function's responses must be [smaller than 400KB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html#limits-items). + + Larger items cannot be written to DynamoDB and will cause exceptions. + +???+ info "Info: DynamoDB" + Each function invocation will generally make 2 requests to DynamoDB. If the + result returned by your Lambda is less than 1kb, you can expect 2 WCUs per invocation. For retried invocations, you will + see 1WCU and 1RCU. Review the [DynamoDB pricing documentation](https://aws.amazon.com/dynamodb/pricing/) to + estimate the cost. + +### Idempotent decorator + +You can quickly start by initializing the `DynamoDBPersistenceLayer` class and using it with the `idempotent` decorator on your lambda handler. + +=== "app.py" + + ```python hl_lines="1-3 5 7 14" + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + payment = create_subscription_payment( + user=event['user'], + product=event['product_id'] + ) + ... + return { + "payment_id": payment.id, + "message": "success", + "statusCode": 200, + } + ``` + +=== "Example event" + + ```json + { + "username": "xyz", + "product_id": "123456789" + } + ``` + +### Idempotent_function decorator + +Similar to [idempotent decorator](#idempotent-decorator), you can use `idempotent_function` decorator for any synchronous Python function. + +When using `idempotent_function`, you must tell us which keyword parameter in your function signature has the data we should use via **`data_keyword_argument`**. + +!!! info "We support JSON serializable data, [Python Dataclasses](https://docs.python.org/3.7/library/dataclasses.html){target="_blank"}, [Parser/Pydantic Models](parser.md){target="_blank"}, and our [Event Source Data Classes](./data_classes.md){target="_blank"}." + +???+ warning + Make sure to call your decorated function using keyword arguments + +=== "batch_sample.py" + + This example also demonstrates how you can integrate with [Batch utility](batch.md), so you can process each record in an idempotent manner. + + ```python hl_lines="4-5 16 21 29" + from aws_lambda_powertools.utilities.batch import (BatchProcessor, EventType, + batch_processor) + from aws_lambda_powertools.utilities.data_classes.sqs_event import SQSRecord + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, IdempotencyConfig, idempotent_function) + + + processor = BatchProcessor(event_type=EventType.SQS) + dynamodb = DynamoDBPersistenceLayer(table_name="idem") + config = IdempotencyConfig( + event_key_jmespath="messageId", # see Choosing a payload subset section + use_local_cache=True, + ) + + + @idempotent_function(data_keyword_argument="record", config=config, persistence_store=dynamodb) + def record_handler(record: SQSRecord): + return {"message": record["body"]} + + + @idempotent_function(data_keyword_argument="data", config=config, persistence_store=dynamodb) + def dummy(arg_one, arg_two, data: dict, **kwargs): + return {"data": data} + + + @batch_processor(record_handler=record_handler, processor=processor) + def lambda_handler(event, context): + # `data` parameter must be called as a keyword argument to work + dummy("hello", "universe", data="test") + return processor.response() + ``` + +=== "Batch event" + + ```json hl_lines="4" + { + "Records": [ + { + "messageId": "059f36b4-87a3-44ab-83d2-661975830a7d", + "receiptHandle": "AQEBwJnKyrHigUMZj6rYigCgxlaS3SLy0a...", + "body": "Test message.", + "attributes": { + "ApproximateReceiveCount": "1", + "SentTimestamp": "1545082649183", + "SenderId": "AIDAIENQZJOLO23YVJ4VO", + "ApproximateFirstReceiveTimestamp": "1545082649185" + }, + "messageAttributes": { + "testAttr": { + "stringValue": "100", + "binaryValue": "base64Str", + "dataType": "Number" + } + }, + "md5OfBody": "e4e68fb7bd0e697a0ae8f1bb342846b3", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:us-east-2:123456789012:my-queue", + "awsRegion": "us-east-2" + } + ] + } + ``` + +=== "dataclass_sample.py" + + ```python hl_lines="3-4 23 32" + from dataclasses import dataclass + + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, IdempotencyConfig, idempotent_function) + + dynamodb = DynamoDBPersistenceLayer(table_name="idem") + config = IdempotencyConfig( + event_key_jmespath="order_id", # see Choosing a payload subset section + use_local_cache=True, + ) + + @dataclass + class OrderItem: + sku: str + description: str + + @dataclass + class Order: + item: OrderItem + order_id: int + + + @idempotent_function(data_keyword_argument="order", config=config, persistence_store=dynamodb) + def process_order(order: Order): + return f"processed order {order.order_id}" + + + order_item = OrderItem(sku="fake", description="sample") + order = Order(item=order_item, order_id="fake-id") + + # `order` parameter must be called as a keyword argument to work + process_order(order=order) + ``` + +=== "parser_pydantic_sample.py" + + ```python hl_lines="1-2 22 31" + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, IdempotencyConfig, idempotent_function) + from aws_lambda_powertools.utilities.parser import BaseModel + + dynamodb = DynamoDBPersistenceLayer(table_name="idem") + config = IdempotencyConfig( + event_key_jmespath="order_id", # see Choosing a payload subset section + use_local_cache=True, + ) + + + class OrderItem(BaseModel): + sku: str + description: str + + + class Order(BaseModel): + item: OrderItem + order_id: int + + + @idempotent_function(data_keyword_argument="order", config=config, persistence_store=dynamodb) + def process_order(order: Order): + return f"processed order {order.order_id}" + + + order_item = OrderItem(sku="fake", description="sample") + order = Order(item=order_item, order_id="fake-id") + + # `order` parameter must be called as a keyword argument to work + process_order(order=order) + ``` + +### Choosing a payload subset for idempotency + +???+ tip "Tip: Dealing with always changing payloads" + When dealing with a more elaborate payload, where parts of the payload always change, you should use **`event_key_jmespath`** parameter. + +Use [`IdempotencyConfig`](#customizing-the-default-behavior) to instruct the idempotent decorator to only use a portion of your payload to verify whether a request is idempotent, and therefore it should not be retried. + +> **Payment scenario** + +In this example, we have a Lambda handler that creates a payment for a user subscribing to a product. We want to ensure that we don't accidentally charge our customer by subscribing them more than once. + +Imagine the function executes successfully, but the client never receives the response due to a connection issue. It is safe to retry in this instance, as the idempotent decorator will return a previously saved response. + +???+ warning "Warning: Idempotency for JSON payloads" + The payload extracted by the `event_key_jmespath` is treated as a string by default, so will be sensitive to differences in whitespace even when the JSON payload itself is identical. + + To alter this behaviour, we can use the [JMESPath built-in function](jmespath_functions.md#powertools_json-function) `powertools_json()` to treat the payload as a JSON object (dict) rather than a string. + +=== "payment.py" + + ```python hl_lines="2-4 10 12 15 20" + import json + from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") + + # Treat everything under the "body" key + # in the event json object as our payload + config = IdempotencyConfig(event_key_jmespath="powertools_json(body)") + + @idempotent(config=config, persistence_store=persistence_layer) + def handler(event, context): + body = json.loads(event['body']) + payment = create_subscription_payment( + user=body['user'], + product=body['product_id'] + ) + ... + return { + "payment_id": payment.id, + "message": "success", + "statusCode": 200 + } + ``` + +=== "Example event" + + ```json hl_lines="28" + { + "version":"2.0", + "routeKey":"ANY /createpayment", + "rawPath":"/createpayment", + "rawQueryString":"", + "headers": { + "Header1": "value1", + "Header2": "value2" + }, + "requestContext":{ + "accountId":"123456789012", + "apiId":"api-id", + "domainName":"id.execute-api.us-east-1.amazonaws.com", + "domainPrefix":"id", + "http":{ + "method":"POST", + "path":"/createpayment", + "protocol":"HTTP/1.1", + "sourceIp":"ip", + "userAgent":"agent" + }, + "requestId":"id", + "routeKey":"ANY /createpayment", + "stage":"$default", + "time":"10/Feb/2021:13:40:43 +0000", + "timeEpoch":1612964443723 + }, + "body":"{\"user\":\"xyz\",\"product_id\":\"123456789\"}", + "isBase64Encoded":false + } + ``` + + +### Idempotency request flow + +This sequence diagram shows an example flow of what happens in the payment scenario: + +![Idempotent sequence](../media/idempotent_sequence.png) + +The client was successful in receiving the result after the retry. Since the Lambda handler was only executed once, our customer hasn't been charged twice. + +???+ note + Bear in mind that the entire Lambda handler is treated as a single idempotent operation. If your Lambda handler can cause multiple side effects, consider splitting it into separate functions. + +### Handling exceptions + +If you are using the `idempotent` decorator on your Lambda handler, any unhandled exceptions that are raised during the code execution will cause **the record in the persistence layer to be deleted**. +This means that new invocations will execute your code again despite having the same payload. If you don't want the record to be deleted, you need to catch exceptions within the idempotent function and return a successful response. + + +![Idempotent sequence exception](../media/idempotent_sequence_exception.png) + +If you are using `idempotent_function`, any unhandled exceptions that are raised _inside_ the decorated function will cause the record in the persistence layer to be deleted, and allow the function to be executed again if retried. + +If an Exception is raised _outside_ the scope of the decorated function and after your function has been called, the persistent record will not be affected. In this case, idempotency will be maintained for your decorated function. Example: + +```python hl_lines="2-4 8-10" title="Exception not affecting idempotency record sample" +def lambda_handler(event, context): + # If an exception is raised here, no idempotent record will ever get created as the + # idempotent function does not get called + do_some_stuff() + + result = call_external_service(data={"user": "user1", "id": 5}) + + # This exception will not cause the idempotent record to be deleted, since it + # happens after the decorated function has been successfully called + raise Exception + + +@idempotent_function(data_keyword_argument="data", config=config, persistence_store=dynamodb) +def call_external_service(data: dict, **kwargs): + result = requests.post('http://example.com', json={"user": data['user'], "transaction_id": data['id']} + return result.json() +``` + +???+ warning + **We will raise `IdempotencyPersistenceLayerError`** if any of the calls to the persistence layer fail unexpectedly. + + As this happens outside the scope of your decorated function, you are not able to catch it if you're using the `idempotent` decorator on your Lambda handler. + +### Persistence layers + +#### DynamoDBPersistenceLayer + +This persistence layer is built-in, and you can either use an existing DynamoDB table or create a new one dedicated for idempotency state (recommended). + +```python hl_lines="5-9" title="Customizing DynamoDBPersistenceLayer to suit your table structure" +from aws_lambda_powertools.utilities.idempotency import DynamoDBPersistenceLayer + +persistence_layer = DynamoDBPersistenceLayer( + table_name="IdempotencyTable", + key_attr="idempotency_key", + expiry_attr="expires_at", + status_attr="current_status", + data_attr="result_data", + validation_key_attr="validation_key", +) +``` + +When using DynamoDB as a persistence layer, you can alter the attribute names by passing these parameters when initializing the persistence layer: + +Parameter | Required | Default | Description +------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | --------------------------------------------------------------------------------- +**table_name** | :heavy_check_mark: | | Table name to store state +**key_attr** | | `id` | Partition key of the table. Hashed representation of the payload (unless **sort_key_attr** is specified) +**expiry_attr** | | `expiration` | Unix timestamp of when record expires +**status_attr** | | `status` | Stores status of the lambda execution during and after invocation +**data_attr** | | `data` | Stores results of successfully executed Lambda handlers +**validation_key_attr** | | `validation` | Hashed representation of the parts of the event used for validation +**sort_key_attr** | | | Sort key of the table (if table is configured with a sort key). +**static_pk_value** | | `idempotency#{LAMBDA_FUNCTION_NAME}` | Static value to use as the partition key. Only used when **sort_key_attr** is set. + +## Advanced + +### Customizing the default behavior + +Idempotent decorator can be further configured with **`IdempotencyConfig`** as seen in the previous example. These are the available options for further configuration + +Parameter | Default | Description +------------------------------------------------- | ------------------------------------------------- | --------------------------------------------------------------------------------- +**event_key_jmespath** | `""` | JMESPath expression to extract the idempotency key from the event record using [built-in functions](/utilities/jmespath_functions) +**payload_validation_jmespath** | `""` | JMESPath expression to validate whether certain parameters have changed in the event while the event payload +**raise_on_no_idempotency_key** | `False` | Raise exception if no idempotency key was found in the request +**expires_after_seconds** | 3600 | The number of seconds to wait before a record is expired +**use_local_cache** | `False` | Whether to locally cache idempotency results +**local_cache_max_items** | 256 | Max number of items to store in local cache +**hash_function** | `md5` | Function to use for calculating hashes, as provided by [hashlib](https://docs.python.org/3/library/hashlib.html) in the standard library. + +### Handling concurrent executions with the same payload + +This utility will raise an **`IdempotencyAlreadyInProgressError`** exception if you receive **multiple invocations with the same payload while the first invocation hasn't completed yet**. + +???+ info + If you receive `IdempotencyAlreadyInProgressError`, you can safely retry the operation. + +This is a locking mechanism for correctness. Since we don't know the result from the first invocation yet, we can't safely allow another concurrent execution. + +### Using in-memory cache + +**By default, in-memory local caching is disabled**, since we don't know how much memory you consume per invocation compared to the maximum configured in your Lambda function. + +???+ note "Note: This in-memory cache is local to each Lambda execution environment" + This means it will be effective in cases where your function's concurrency is low in comparison to the number of "retry" invocations with the same payload, because cache might be empty. + +You can enable in-memory caching with the **`use_local_cache`** parameter: + +```python hl_lines="8 11" title="Caching idempotent transactions in-memory to prevent multiple calls to storage" +from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent +) + +persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") +config = IdempotencyConfig( + event_key_jmespath="body", + use_local_cache=True, +) + +@idempotent(config=config, persistence_store=persistence_layer) +def handler(event, context): + ... +``` + +When enabled, the default is to cache a maximum of 256 records in each Lambda execution environment - You can change it with the **`local_cache_max_items`** parameter. + +### Expiring idempotency records + +???+ note + By default, we expire idempotency records after **an hour** (3600 seconds). + +In most cases, it is not desirable to store the idempotency records forever. Rather, you want to guarantee that the same payload won't be executed within a period of time. + +You can change this window with the **`expires_after_seconds`** parameter: + +```python hl_lines="8 11" title="Adjusting cache TTL" +from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent +) + +persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") +config = IdempotencyConfig( + event_key_jmespath="body", + expires_after_seconds=5*60, # 5 minutes +) + +@idempotent(config=config, persistence_store=persistence_layer) +def handler(event, context): + ... +``` + +This will mark any records older than 5 minutes as expired, and the lambda handler will be executed as normal if it is invoked with a matching payload. + +???+ note "Note: DynamoDB time-to-live field" + This utility uses **`expiration`** as the TTL field in DynamoDB, as [demonstrated in the SAM example earlier](#required-resources). + +### Payload validation + +???+ question "Question: What if your function is invoked with the same payload except some outer parameters have changed?" + Example: A payment transaction for a given productID was requested twice for the same customer, **however the amount to be paid has changed in the second transaction**. + +By default, we will return the same result as it returned before, however in this instance it may be misleading; we provide a fail fast payload validation to address this edge case. + +With **`payload_validation_jmespath`**, you can provide an additional JMESPath expression to specify which part of the event body should be validated against previous idempotent invocations + +=== "app.py" + + ```python hl_lines="7 11 18 25" + from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent + ) + + config = IdempotencyConfig( + event_key_jmespath="[userDetail, productId]", + payload_validation_jmespath="amount" + ) + persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") + + @idempotent(config=config, persistence_store=persistence_layer) + def handler(event, context): + # Creating a subscription payment is a side + # effect of calling this function! + payment = create_subscription_payment( + user=event['userDetail']['username'], + product=event['product_id'], + amount=event['amount'] + ) + ... + return { + "message": "success", + "statusCode": 200, + "payment_id": payment.id, + "amount": payment.amount + } + ``` + +=== "Example Event 1" + + ```json hl_lines="8" + { + "userDetail": { + "username": "User1", + "user_email": "user@example.com" + }, + "productId": 1500, + "charge_type": "subscription", + "amount": 500 + } + ``` + +=== "Example Event 2" + + ```json hl_lines="8" + { + "userDetail": { + "username": "User1", + "user_email": "user@example.com" + }, + "productId": 1500, + "charge_type": "subscription", + "amount": 1 + } + ``` + +In this example, the **`userDetail`** and **`productId`** keys are used as the payload to generate the idempotency key, as per **`event_key_jmespath`** parameter. + +???+ note + If we try to send the same request but with a different amount, we will raise **`IdempotencyValidationError`**. + +Without payload validation, we would have returned the same result as we did for the initial request. Since we're also returning an amount in the response, this could be quite confusing for the client. + +By using **`payload_validation_jmespath="amount"`**, we prevent this potentially confusing behavior and instead raise an Exception. + +### Making idempotency key required + +If you want to enforce that an idempotency key is required, you can set **`raise_on_no_idempotency_key`** to `True`. + +This means that we will raise **`IdempotencyKeyError`** if the evaluation of **`event_key_jmespath`** is `None`. + +=== "app.py" + + ```python hl_lines="9-10 13" + from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") + + # Requires "user"."uid" and "order_id" to be present + config = IdempotencyConfig( + event_key_jmespath="[user.uid, order_id]", + raise_on_no_idempotency_key=True, + ) + + @idempotent(config=config, persistence_store=persistence_layer) + def handler(event, context): + pass + ``` + +=== "Success Event" + + ```json hl_lines="3 6" + { + "user": { + "uid": "BB0D045C-8878-40C8-889E-38B3CB0A61B1", + "name": "Foo" + }, + "order_id": 10000 + } + ``` + +=== "Failure Event" + + Notice that `order_id` is now accidentally within `user` key + + ```json hl_lines="3 5" + { + "user": { + "uid": "DE0D000E-1234-10D1-991E-EAC1DD1D52C8", + "name": "Joe Bloggs", + "order_id": 10000 + }, + } + ``` + +### Customizing boto configuration + +The **`boto_config`** and **`boto3_session`** parameters enable you to pass in a custom [botocore config object](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html) or a custom [boto3 session](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html) when constructing the persistence store. + +=== "Custom session" + + ```python hl_lines="1 6 9 14" + import boto3 + from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent + ) + + boto3_session = boto3.session.Session() + persistence_layer = DynamoDBPersistenceLayer( + table_name="IdempotencyTable", + boto3_session=boto3_session + ) + + config = IdempotencyConfig(event_key_jmespath="body") + + @idempotent(config=config, persistence_store=persistence_layer) + def handler(event, context): + ... + ``` +=== "Custom config" + + ```python hl_lines="1 7 10" + from botocore.config import Config + from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent + ) + + config = IdempotencyConfig(event_key_jmespath="body") + boto_config = Config() + persistence_layer = DynamoDBPersistenceLayer( + table_name="IdempotencyTable", + boto_config=boto_config + ) + + @idempotent(config=config, persistence_store=persistence_layer) + def handler(event, context): + ... + ``` + +### Using a DynamoDB table with a composite primary key + +When using a composite primary key table (hash+range key), use `sort_key_attr` parameter when initializing your persistence layer. + +With this setting, we will save the idempotency key in the sort key instead of the primary key. By default, the primary key will now be set to `idempotency#{LAMBDA_FUNCTION_NAME}`. + +You can optionally set a static value for the partition key using the `static_pk_value` parameter. + +```python hl_lines="5" title="Reusing a DynamoDB table that uses a composite primary key" +from aws_lambda_powertools.utilities.idempotency import DynamoDBPersistenceLayer, idempotent + +persistence_layer = DynamoDBPersistenceLayer( + table_name="IdempotencyTable", + sort_key_attr='sort_key') + + +@idempotent(persistence_store=persistence_layer) +def handler(event, context): + return {"message": "success": "id": event['body']['id]} +``` + +The example function above would cause data to be stored in DynamoDB like this: + +| id | sort_key | expiration | status | data | +|------------------------------|----------------------------------|------------|-------------|-------------------------------------| +| idempotency#MyLambdaFunction | 1e956ef7da78d0cb890be999aecc0c9e | 1636549553 | COMPLETED | {"id": 12391, "message": "success"} | +| idempotency#MyLambdaFunction | 2b2cdb5f86361e97b4383087c1ffdf27 | 1636549571 | COMPLETED | {"id": 527212, "message": "success"}| +| idempotency#MyLambdaFunction | f091d2527ad1c78f05d54cc3f363be80 | 1636549585 | IN_PROGRESS | | + +### Bring your own persistent store + +This utility provides an abstract base class (ABC), so that you can implement your choice of persistent storage layer. + +You can inherit from the `BasePersistenceLayer` class and implement the abstract methods `_get_record`, `_put_record`, +`_update_record` and `_delete_record`. + +```python hl_lines="8-13 57 65 74 96 124" title="Excerpt DynamoDB Persisntence Layer implementation for reference" +import datetime +import logging +from typing import Any, Dict, Optional + +import boto3 +from botocore.config import Config + +from aws_lambda_powertools.utilities.idempotency import BasePersistenceLayer +from aws_lambda_powertools.utilities.idempotency.exceptions import ( + IdempotencyItemAlreadyExistsError, + IdempotencyItemNotFoundError, +) +from aws_lambda_powertools.utilities.idempotency.persistence.base import DataRecord + +logger = logging.getLogger(__name__) + + +class DynamoDBPersistenceLayer(BasePersistenceLayer): + def __init__( + self, + table_name: str, + key_attr: str = "id", + expiry_attr: str = "expiration", + status_attr: str = "status", + data_attr: str = "data", + validation_key_attr: str = "validation", + boto_config: Optional[Config] = None, + boto3_session: Optional[boto3.session.Session] = None, + ): + boto_config = boto_config or Config() + session = boto3_session or boto3.session.Session() + self._ddb_resource = session.resource("dynamodb", config=boto_config) + self.table_name = table_name + self.table = self._ddb_resource.Table(self.table_name) + self.key_attr = key_attr + self.expiry_attr = expiry_attr + self.status_attr = status_attr + self.data_attr = data_attr + self.validation_key_attr = validation_key_attr + super(DynamoDBPersistenceLayer, self).__init__() + + def _item_to_data_record(self, item: Dict[str, Any]) -> DataRecord: + """ + Translate raw item records from DynamoDB to DataRecord + + Parameters + ---------- + item: Dict[str, Union[str, int]] + Item format from dynamodb response + + Returns + ------- + DataRecord + representation of item + + """ + return DataRecord( + idempotency_key=item[self.key_attr], + status=item[self.status_attr], + expiry_timestamp=item[self.expiry_attr], + response_data=item.get(self.data_attr), + payload_hash=item.get(self.validation_key_attr), + ) + + def _get_record(self, idempotency_key) -> DataRecord: + response = self.table.get_item(Key={self.key_attr: idempotency_key}, ConsistentRead=True) + + try: + item = response["Item"] + except KeyError: + raise IdempotencyItemNotFoundError + return self._item_to_data_record(item) + + def _put_record(self, data_record: DataRecord) -> None: + item = { + self.key_attr: data_record.idempotency_key, + self.expiry_attr: data_record.expiry_timestamp, + self.status_attr: data_record.status, + } + + if self.payload_validation_enabled: + item[self.validation_key_attr] = data_record.payload_hash + + now = datetime.datetime.now() + try: + logger.debug(f"Putting record for idempotency key: {data_record.idempotency_key}") + self.table.put_item( + Item=item, + ConditionExpression=f"attribute_not_exists({self.key_attr}) OR {self.expiry_attr} < :now", + ExpressionAttributeValues={":now": int(now.timestamp())}, + ) + except self._ddb_resource.meta.client.exceptions.ConditionalCheckFailedException: + logger.debug(f"Failed to put record for already existing idempotency key: {data_record.idempotency_key}") + raise IdempotencyItemAlreadyExistsError + + def _update_record(self, data_record: DataRecord): + logger.debug(f"Updating record for idempotency key: {data_record.idempotency_key}") + update_expression = "SET #response_data = :response_data, #expiry = :expiry, #status = :status" + expression_attr_values = { + ":expiry": data_record.expiry_timestamp, + ":response_data": data_record.response_data, + ":status": data_record.status, + } + expression_attr_names = { + "#response_data": self.data_attr, + "#expiry": self.expiry_attr, + "#status": self.status_attr, + } + + if self.payload_validation_enabled: + update_expression += ", #validation_key = :validation_key" + expression_attr_values[":validation_key"] = data_record.payload_hash + expression_attr_names["#validation_key"] = self.validation_key_attr + + kwargs = { + "Key": {self.key_attr: data_record.idempotency_key}, + "UpdateExpression": update_expression, + "ExpressionAttributeValues": expression_attr_values, + "ExpressionAttributeNames": expression_attr_names, + } + + self.table.update_item(**kwargs) + + def _delete_record(self, data_record: DataRecord) -> None: + logger.debug(f"Deleting record for idempotency key: {data_record.idempotency_key}") + self.table.delete_item(Key={self.key_attr: data_record.idempotency_key},) +``` + +???+ danger + Pay attention to the documentation for each - you may need to perform additional checks inside these methods to ensure the idempotency guarantees remain intact. + + For example, the `_put_record` method needs to raise an exception if a non-expired record already exists in the data store with a matching key. + +## Compatibility with other utilities + +### Validation utility + +The idempotency utility can be used with the `validator` decorator. Ensure that idempotency is the innermost decorator. + +???+ warning + If you use an envelope with the validator, the event received by the idempotency utility will be the unwrapped + event - not the "raw" event Lambda was invoked with. + + Make sure to account for this behaviour, if you set the `event_key_jmespath`. + +```python hl_lines="9 10" title="Using Idempotency with JSONSchema Validation utility" +from aws_lambda_powertools.utilities.validation import validator, envelopes +from aws_lambda_powertools.utilities.idempotency import ( + IdempotencyConfig, DynamoDBPersistenceLayer, idempotent +) + +config = IdempotencyConfig(event_key_jmespath="[message, username]") +persistence_layer = DynamoDBPersistenceLayer(table_name="IdempotencyTable") + +@validator(envelope=envelopes.API_GATEWAY_HTTP) +@idempotent(config=config, persistence_store=persistence_layer) +def lambda_handler(event, context): + cause_some_side_effects(event['username') + return {"message": event['message'], "statusCode": 200} +``` + +???+ tip "Tip: JMESPath Powertools functions are also available" + Built-in functions known in the validation utility like `powertools_json`, `powertools_base64`, `powertools_base64_gzip` are also available to use in this utility. + + +## Testing your code + +The idempotency utility provides several routes to test your code. + +### Disabling the idempotency utility +When testing your code, you may wish to disable the idempotency logic altogether and focus on testing your business logic. To do this, you can set the environment variable `POWERTOOLS_IDEMPOTENCY_DISABLED` +with a truthy value. If you prefer setting this for specific tests, and are using Pytest, you can use [monkeypatch](https://docs.pytest.org/en/latest/monkeypatch.html) fixture: + +=== "tests.py" + + ```python hl_lines="2 3" + def test_idempotent_lambda_handler(monkeypatch): + # Set POWERTOOLS_IDEMPOTENCY_DISABLED before calling decorated functions + monkeypatch.setenv("POWERTOOLS_IDEMPOTENCY_DISABLED", 1) + + result = handler() + ... + ``` +=== "app.py" + + ```python + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="idempotency") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + print('expensive operation') + return { + "payment_id": 12345, + "message": "success", + "statusCode": 200, + } + ``` + +### Testing with DynamoDB Local + +To test with [DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.DownloadingAndRunning.html), you can replace the `Table` resource used by the persistence layer with one you create inside your tests. This allows you to set the endpoint_url. + +=== "tests.py" + + ```python hl_lines="6 7 8" + import boto3 + + import app + + def test_idempotent_lambda(): + # Create our own Table resource using the endpoint for our DynamoDB Local instance + resource = boto3.resource("dynamodb", endpoint_url='http://localhost:8000') + table = resource.Table(app.persistence_layer.table_name) + app.persistence_layer.table = table + + result = app.handler({'testkey': 'testvalue'}, {}) + assert result['payment_id'] == 12345 + ``` + +=== "app.py" + + ```python + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="idempotency") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + print('expensive operation') + return { + "payment_id": 12345, + "message": "success", + "statusCode": 200, + } + ``` + +### How do I mock all DynamoDB I/O operations + +The idempotency utility lazily creates the dynamodb [Table](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#table) which it uses to access DynamoDB. +This means it is possible to pass a mocked Table resource, or stub various methods. + +=== "tests.py" + + ```python hl_lines="6 7 8 9" + from unittest.mock import MagicMock + + import app + + def test_idempotent_lambda(): + table = MagicMock() + app.persistence_layer.table = table + result = app.handler({'testkey': 'testvalue'}, {}) + table.put_item.assert_called() + ... + ``` + +=== "app.py" + + ```python + from aws_lambda_powertools.utilities.idempotency import ( + DynamoDBPersistenceLayer, idempotent + ) + + persistence_layer = DynamoDBPersistenceLayer(table_name="idempotency") + + @idempotent(persistence_store=persistence_layer) + def handler(event, context): + print('expensive operation') + return { + "payment_id": 12345, + "message": "success", + "statusCode": 200, + } + ``` + +## Extra resources + +If you're interested in a deep dive on how Amazon uses idempotency when building our APIs, check out +[this article](https://aws.amazon.com/builders-library/making-retries-safe-with-idempotent-APIs/). diff --git a/docs/tutorial/index.md b/docs/tutorial/index.md new file mode 100644 index 00000000000..c6b99d1938b --- /dev/null +++ b/docs/tutorial/index.md @@ -0,0 +1,1041 @@ +--- +title: Tutorial +description: Powertools introduction +--- + +This tutorial progressively introduces Lambda Powertools core utilities by using one feature at a time. + +## Requirements + +* [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html){target="_blank"} and [configured with your credentials](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-getting-started-set-up-credentials.html){target="_blank"}. +* [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html){target="_blank"} installed. + +## Getting started + +Let's clone our sample project before we add one feature at a time. + +???+ tip "Tip: Want to skip to the final project?" + Bootstrap directly via SAM CLI: `sam init --location https://github.com/aws-samples/cookiecutter-aws-sam-python` + +```bash title="Use SAM CLI to initialize the sample project" +sam init --runtime python3.9 --dependency-manager pip --app-template hello-world --name powertools-quickstart +``` + +### Project structure + +As we move forward, we will modify the following files within the `powertools-quickstart` folder: + +* **app.py** - Application code. +* **template.yaml** - AWS infrastructure configuration using SAM. +* **requirements.txt** - List of extra Python packages needed. + +### Code example + +Let's configure our base application to look like the following code snippet. + +=== "app.py" + + ```python + import json + + + def hello(): + return {"statusCode": 200, "body": json.dumps({"message": "hello unknown!"})} + + + def lambda_handler(event, context): + return hello() + ``` + +=== "template.yaml" + + ```yaml + AWSTemplateFormatVersion: '2010-09-09' + Transform: AWS::Serverless-2016-10-31 + Description: Sample SAM Template for powertools-quickstart + Globals: + Function: + Timeout: 3 + Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: hello_world/ + Handler: app.lambda_handler + Runtime: python3.9 + Architectures: + - x86_64 + Events: + HelloWorld: + Type: Api + Properties: + Path: /hello + Method: get + Outputs: + HelloWorldApi: + Description: "API Gateway endpoint URL for Prod stage for Hello World function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + ``` +Our Lambda code consists of an entry point function named `lambda_handler`, and a `hello` function. + +When API Gateway receives a HTTP GET request on `/hello` route, Lambda will call our `lambda_handler` function, subsequently calling the `hello` function. API Gateway will use this response to return the correct HTTP Status Code and payload back to the caller. + +???+ warning + For simplicity, we do not set up authentication and authorization! You can find more information on how to implement it on [AWS SAM documentation](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-controlling-access-to-apis.html){target="_blank"}. +### Run your code + +At each point, you have two ways to run your code: locally and within your AWS account. + +#### Local test + +AWS SAM allows you to execute a serverless application locally by running `sam build && sam local start-api` in your preferred shell. + +```bash title="Build and run API Gateway locally" +> sam build && sam local start-api +... +2021-11-26 17:43:08 * Running on http://127.0.0.1:3000/ (Press CTRL+C to quit) +``` + +As a result, a local API endpoint will be exposed and you can invoke it using your browser, or your preferred HTTP API client e.g., [Postman](https://www.postman.com/downloads/){target="_blank"}, [httpie](https://httpie.io/){target="_blank"}, etc. + +```bash title="Invoking our function locally via curl" +> curl http://127.0.0.1:3000/hello +{"message": "hello unknown!"} +``` + +???+ info + To learn more about local testing, please visit the [AWS SAM CLI local testing](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-cli-command-reference-sam-local-start-api.html) documentation. + + +#### Live test + +First, you need to deploy your application into your AWS Account by issuing `sam build && sam deploy --guided` command. This command builds a ZIP package of your source code, and deploy it to your AWS Account. + +```bash title="Build and deploy your serverless application" +> sam build && sam deploy --guided +... +CloudFormation outputs from deployed stack +------------------------------------------------------------------------------------------------------------------------------------------ +Outputs +------------------------------------------------------------------------------------------------------------------------------------------ +Key HelloWorldFunctionIamRole +Description Implicit IAM Role created for Hello World function +Value arn:aws:iam::123456789012:role/sam-app-HelloWorldFunctionRole-1T2W3H9LZHGGV + +Key HelloWorldApi +Description API Gateway endpoint URL for Prod stage for Hello World function +Value https://1234567890.execute-api.eu-central-1.amazonaws.com/Prod/hello/ + +Key HelloWorldFunction +Description Hello World Lambda Function ARN +Value arn:aws:lambda:eu-central-1:123456789012:function:sam-app-HelloWorldFunction-dOcfAtYoEiGo +------------------------------------------------------------------------------------------------------------------------------------------ +Successfully created/updated stack - sam-app in eu-central-1 +``` + +At the end of the deployment, you will find the API endpoint URL within `Outputs` section. You can use this URL to test your serverless application. + +```bash title="Invoking our application via API endpoint" +> curl https://1234567890.execute-api.eu-central-1.amazonaws.com/Prod/hello +{"message": "hello unknown!"}% +``` + +???+ Info + For more details on AWS SAM deployment mechanism, see [SAM Deploy reference docs](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-cli-command-reference-sam-deploy.html). + +## Routing + +### Adding a new route + +Let's expand our application with a new route - `/hello/{name}`. It will accept an username as a path input and return it in the response. + +For this to work, we could create a new Lambda function to handle incoming requests for `/hello/{name}` - It'd look like this: + +=== "hello_by_name.py" + + ```python + import json + + + def hello_name(name): + return {"statusCode": 200, "body": json.dumps({"message": f"hello {name}!"})} + + + def lambda_handler(event, context): + name = event["pathParameters"]["name"] + return hello_name(name) + ``` + +=== "template.yaml" + + ```yaml hl_lines="21-32" + AWSTemplateFormatVersion: "2010-09-09" + Transform: AWS::Serverless-2016-10-31 + Description: Sample SAM Template for powertools-quickstart + Globals: + Function: + Timeout: 3 + Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: hello_world/ + Handler: app.lambda_handler + Runtime: python3.9 + Events: + HelloWorld: + Type: Api + Properties: + Path: /hello + Method: get + + HelloWorldByNameFunctionName: + Type: AWS::Serverless::Function + Properties: + CodeUri: hello_world/ + Handler: hello_by_name.lambda_handler + Runtime: python3.9 + Events: + HelloWorldName: + Type: Api + Properties: + Path: /hello/{name} + Method: get + Outputs: + HelloWorldApi: + Description: "API Gateway endpoint URL for Prod stage for Hello World function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + ``` + +???+ question + But what happens if your application gets bigger and we need to cover numerous URL paths and HTTP methods for them? + +**This would quickly become non-trivial to maintain**. Adding new Lambda function for each path, or multiple if/else to handle several routes & HTTP Methods can be error prone. + +### Creating our own router + +???+ question + What if we create a simple router to reduce boilerplate? + +We could group similar routes and intents, separate read and write operations resulting in fewer functions. It doesn't address the boilerplate routing code, but maybe it will be easier to add additional URLs. + +???+ info "Info: You might be already asking yourself about mono vs micro-functions" + If you want a more detailed explanation of these two approaches, head over to the [trade-offs on each approach](../core/event_handler/api_gateway/#considerations){target="_blank"} later. + +A first attempt at the routing logic might look similar to the following code snippet. + +=== "app.py" + + ```python hl_lines="4 9 13 27-29 35-36" + import json + + + def hello_name(event, **kargs): + username = event["pathParameters"]["name"] + return {"statusCode": 200, "body": json.dumps({"message": f"hello {username}!"})} + + + def hello(**kargs): + return {"statusCode": 200, "body": json.dumps({"message": "hello unknown!"})} + + + class Router: + def __init__(self): + self.routes = {} + + def set(self, path, method, handler): + self.routes[f"{path}-{method}"] = handler + + def get(self, path, method): + try: + route = self.routes[f"{path}-{method}"] + except KeyError: + raise RuntimeError(f"Cannot route request to the correct method. path={path}, method={method}") + return route + + router = Router() + router.set(path="/hello", method="GET", handler=hello) + router.set(path="/hello/{name}", method="GET", handler=hello_name) + + + def lambda_handler(event, context): + path = event["resource"] + http_method = event["httpMethod"] + method = router.get(path=path, method=http_method) + return method(event=event) + ``` + +=== "template.yaml" + + ```yaml hl_lines="15-24" + AWSTemplateFormatVersion: "2010-09-09" + Transform: AWS::Serverless-2016-10-31 + Description: Sample SAM Template for powertools-quickstart + Globals: + Function: + Timeout: 3 + Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: hello_world/ + Handler: app.lambda_handler + Runtime: python3.9 + Events: + HelloWorld: + Type: Api + Properties: + Path: /hello + Method: get + HelloWorldName: + Type: Api + Properties: + Path: /hello/{name} + Method: get + Outputs: + HelloWorldApi: + Description: "API Gateway endpoint URL for Prod stage for Hello World function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + ``` + +Let's break this down: + +* **L4,9**: We defined two `hello_name` and `hello` functions to handle `/hello/{name}` and `/hello` routes. +* **L13:** We added a `Router` class to map a path, a method, and the function to call. +* **L27-29**: We create a `Router` instance and map both `/hello` and `/hello/{name}`. +* **L35:** We use Router's `get` method to retrieve a reference to the processing method (`hello` or `hello_name`). +* **L36:** Finally, we run this method and send the results back to API Gateway. + +This approach simplifies the configuration of our infrastructure since we have added all API Gateway paths in the `HelloWorldFunction` event section. + +However, it forces us to understand the internal structure of the API Gateway request events, responses, and it could lead to other errors such as CORS not being handled properly, error handling, etc. + +### Simplifying with Event Handler + +We can massively simplify cross-cutting concerns while keeping it lightweight by using [Event Handler](./core/event_handler/api_gateway.md){target="_blank"}. + +???+ tip + This is available for both [REST API (API Gateway, ALB)](./core/event_handler/api_gateway.md){target="_blank"} and [GraphQL API (AppSync)](./core/event_handler/appsync.md){target="_blank"}. + +Let's include Lambda Powertools as a dependency in `requirement.txt`, and use Event Handler to refactor our previous example. + +=== "app.py" + + ```python hl_lines="1 3 6 11 17" + from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver + + app = ApiGatewayResolver() + + + @app.get("/hello/") + def hello_name(name): + return {"message": f"hello {name}!"} + + + @app.get("/hello") + def hello(): + return {"message": "hello unknown!"} + + + def lambda_handler(event, context): + return app.resolve(event, context) + ``` +=== "requirements.txt" + + ```bash + aws-lambda-powertools + ``` + +Use `sam build && sam local start-api` and try run it locally again. + +???+ note + If you're coming from [Flask](https://flask.palletsprojects.com/en/2.0.x/){target="_blank"}, you will be familiar with this experience already. [Event Handler for API Gateway](./core/event_handler/api_gateway.md){target="_blank"} uses `ApiGatewayResolver` to give a Flask-like experience while staying true to our tenet `Keep it lean`. + +We have added the route annotation as the decorator for our methods. It enables us to use the parameters passed in the request directly, and our responses are simply dictionaries. + +Lastly, we used `return app.resolve(event, context)` so Event Handler can resolve routes, inject the current request, handle serialization, route validation, etc. + +From here, we could handle [404 routes](./core/event_handler/api_gateway.md#handling-not-found-routes){target="_blank"}, [error handling](./core/event_handler/api_gateway.md#http://127.0.0.1:8000/core/event_handler/api_gateway/#exception-handling){target="_blank"}, [access query strings, payload](./core/event_handler/api_gateway.md#http://127.0.0.1:8000/core/event_handler/api_gateway#accessing-request-details){target="_blank"}, etc. + + +???+ tip + If you'd like to learn how python decorators work under the hood, you can follow [Real Python](https://realpython.com/primer-on-python-decorators/)'s article. +## Structured Logging + +Over time, you realize that searching logs as text results in poor observability, it's hard to create metrics from, enumerate common exceptions, etc. + +Then, you decided to propose production quality logging capabilities to your Lambda code. You found out that by having logs as `JSON` you can [structure them](https://docs.aws.amazon.com/lambda/latest/operatorguide/parse-logs.html), so that you can use any Log Analytics tool out there to quickly analyze them. + +This helps not only in searching, but produces consistent logs containing enough context and data to ask arbitrary questions on the status of your system. We can take advantage of CloudWatch Logs and Cloudwatch Insight for this purpose. + +### JSON as output + +The first option could be to use the standard Python Logger, and use a specialized library like `pythonjsonlogger` to create a JSON Formatter. + +=== "app.py" + + ```python hl_lines="4 5 7-12 19 25 30" + import logging + import os + + from pythonjsonlogger import jsonlogger + from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver + + logger = logging.getLogger("APP") + logHandler = logging.StreamHandler() + formatter = jsonlogger.JsonFormatter(fmt="%(asctime)s %(levelname)s %(name)s %(message)s") + logHandler.setFormatter(formatter) + logger.addHandler(logHandler) + logger.setLevel(os.getenv("LOG_LEVEL", "INFO")) + + app = ApiGatewayResolver() + + + @app.get("/hello/") + def hello_name(name): + logger.info(f"Request from {name} received") + return {"message": f"hello {name}!"} + + + @app.get("/hello") + def hello(): + logger.info("Request from unknown received") + return {"message": "hello unknown!"} + + + def lambda_handler(event, context): + logger.debug(event) + return app.resolve(event, context) + ``` +=== "requirements.txt" + + ```bash + aws-lambda-powertools + python-json-logger + ``` + +With just a few lines our logs will now output to `JSON` format. We've taken the following steps to make that work: + +* **L7**: Creates an application logger named `APP`. +* **L8-11**: Configures handler and formatter. +* **L12**: Sets the logging level set in the `LOG_LEVEL` environment variable, or `INFO` as a sentinel value. + +After that, we use this logger in our application code to record the required information. We see logs structured as follows: + +=== "JSON output" + + ```json + { + "asctime": "2021-11-22 15:32:02,145", + "levelname": "INFO", + "name": "APP", + "message": "Request from unknown received" + } + ``` + +=== "Normal output" + + ```python + [INFO] 2021-11-22T15:32:02.145Z ba3bea3d-fe3a-45db-a2ce-72e813d55b91 Request from unknown received + ``` + +So far, so good! We can take a step further now by adding additional context to the logs. + +We could start by creating a dictionary with Lambda context information or something from the incoming event, which should always be logged. Additional attributes could be added on every `logger.info` using `extra` keyword like in any standard Python logger. + + +### Simplifying with Logger + +???+ question "Surely this could be easier, right?" + Yes! Powertools Logger to the rescue :-) + +As we already have Lambda Powertools as a dependency, we can simply import [Logger](./core/logger.md){target="_blank"}. + +```python title="Refactoring with Lambda Powertools Logger" hl_lines="1 3 5 12 18 22" +from aws_lambda_powertools import Logger +from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver +from aws_lambda_powertools.logging import correlation_paths + +logger = Logger(service="APP") + +app = ApiGatewayResolver() + + +@app.get("/hello/") +def hello_name(name): + logger.info(f"Request from {name} received") + return {"message": f"hello {name}!"} + + +@app.get("/hello") +def hello(): + logger.info("Request from unknown received") + return {"message": "hello unknown!"} + + +@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST, log_event=True) +def lambda_handler(event, context): + return app.resolve(event, context) +``` + +Let's break this down: + +* **L5**: We add Lambda Powertools Logger; the boilerplate is now done for you. By default, we set `INFO` as the logging level if `LOG_LEVEL` env var isn't set. +* **L22**: We use `logger.inject_lambda_context` decorator to inject key information from Lambda context into every log. +* **L22**: We also instruct Logger to use the incoming API Gateway Request ID as a [correlation id](./core/logger.md##set_correlation_id-method) automatically. +* **L22**: Since we're in dev, we also use `log_event=True` to automatically log each incoming request for debugging. This can be also set via [environment variables](./index.md#environment-variables){target="_blank"}. + + +This is how the logs would look like now: + +```json title="Our logs are now structured consistently" +{ + "level":"INFO", + "location":"hello:17", + "message":"Request from unknown received", + "timestamp":"2021-10-22 16:29:58,367+0000", + "service":"APP", + "cold_start":true, + "function_name":"HelloWorldFunction", + "function_memory_size":"256", + "function_arn":"arn:aws:lambda:us-east-1:123456789012:function:HelloWorldFunction", + "function_request_id":"d50bb07a-7712-4b2d-9f5d-c837302221a2", + "correlation_id":"bf9b584c-e5d9-4ad5-af3d-db953f2b10dc" +} +``` + +We can now search our logs by the request ID to find a specific operation. Additionally, we can also search our logs for function name, Lambda request ID, Lambda function ARN, find out whether an operation was a cold start, etc. + +From here, we could [set specific keys](./core/logger.md#append_keys-method){target="_blank"} to add additional contextual information about a given operation, [log exceptions](./core/logger.md#logging-exceptions){target="_blank"} to easily enumerate them later, [sample debug logs](./core/logger.md#sampling-debug-logs){target="_blank"}, etc. + +By having structured logs like this, we can easily search and analyse them in [CloudWatch Logs Insight](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html){target="_blank"}. + +=== "CloudWatch Logs Insight Example" +![CloudWatch Logs Insight Example](../media/cloudwatch_logs_insight_example.png) + +## Tracing + +???+ note + You won't see any traces in AWS X-Ray when executing your function locally. + +The next improvement is to add distributed tracing to your stack. Traces help you visualize end-to-end transactions or parts of it to easily debug upstream/downstream anomalies. + +Combined with structured logs, it is an important step to be able to observe how your application runs in production. + +### Generating traces + +[AWS X-Ray](https://aws.amazon.com/xray/){target="_blank"} is the distributed tracing service we're going to use. But how do we generate application traces in the first place? + +It's a [two-step process](https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html){target="_blank"}: + +1. Enable tracing in your Lambda function. +2. Instrument your application code. + +Let's explore how we can instrument our code with [AWS X-Ray SDK](https://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/index.html){target="_blank"}, and then simplify it with [Lambda Powertools Tracer](core/tracer.md){target="_blank"} feature. + +=== "app.py" + + ```python hl_lines="1 13 20 27" + from aws_xray_sdk.core import xray_recorder + + from aws_lambda_powertools import Logger + from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver + from aws_lambda_powertools.logging import correlation_paths + + logger = Logger(service="APP") + + app = ApiGatewayResolver() + + + @app.get("/hello/") + @xray_recorder.capture('hello_name') + def hello_name(name): + logger.info(f"Request from {name} received") + return {"message": f"hello {name}!"} + + + @app.get("/hello") + @xray_recorder.capture('hello') + def hello(): + logger.info("Request from unknown received") + return {"message": "hello unknown!"} + + + @logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST, log_event=True) + @xray_recorder.capture('handler') + def lambda_handler(event, context): + return app.resolve(event, context) + ``` + +=== "template.yaml" + + ```yaml hl_lines="7-8 16" + AWSTemplateFormatVersion: "2010-09-09" + Transform: AWS::Serverless-2016-10-31 + Description: Sample SAM Template for powertools-quickstart + Globals: + Function: + Timeout: 3 + Api: + TracingEnabled: true + Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: hello_world/ + Handler: app.lambda_handler + Runtime: python3.9 + Tracing: Active + Events: + HelloWorld: + Type: Api + Properties: + Path: /hello + Method: get + HelloWorldName: + Type: Api + Properties: + Path: /hello/{name} + Method: get + Outputs: + HelloWorldApi: + Description: "API Gateway endpoint URL for Prod stage for Hello World function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + ``` + +=== "requirements.txt" + + ```bash + aws-lambda-powertools + aws-xray-sdk + ``` + +Let's break it down: + +* **L1**: First, we import AWS X-Ray SDK. `xray_recorder` records blocks of code being traced ([subsegment](https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-subsegments){target="_blank"}). It also sends generated traces to the AWS X-Ray daemon running in the Lambda service who subsequently forwards them to AWS X-Ray service. +* **L13,20,27**: We decorate our function so the SDK traces the end-to-end execution, and the argument names the generated block being traced. + +???+ question + But how do I enable tracing for the Lambda function and what permissions do I need? + +We've made the following changes in `template.yaml` for this to work seamless: + +* **L7-8**: Enables tracing for Amazon API Gateway. +* **L16**: Enables tracing for our Serverless Function. This will also add a managed IAM Policy named [AWSXRayDaemonWriteAccess](https://console.aws.amazon.com/iam/home#/policies/arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess){target="_blank"} to allow Lambda to send traces to AWS X-Ray. + +You can now build and deploy our updates with `sam build && sam deploy`. Once deployed, try invoking the application via the API endpoint, and visit [AWS X-Ray Console](https://console.aws.amazon.com/xray/home#/traces/){target="_blank"} to see how much progress we've made so far!! + +![AWS X-Ray Console trace view](../media/tracer_xray_sdk_showcase.png) + +### Enriching our generated traces + +What we've done helps bring an initial visibility, but we can do so much more. + +???+ question + You're probably asking yourself at least the following questions: + + * What if I want to search traces by customer name? + * What about grouping traces with cold starts? + * Better yet, what if we want to include the request or response of our functions as part of the trace? + +Within AWS X-Ray, we can answer these questions by using two features: tracing **Annotations** and **Metadata**. + +**Annotations** are simple key-value pairs that are indexed for use with [filter expressions](https://docs.aws.amazon.com/xray/latest/devguide/xray-console-filters.html){target="_blank"}. +**Metadata** are key-value pairs with values of any type, including objects and lists, but that are not indexed. + +Let's put them into action. + +```python title="Enriching traces with annotations and metadata" hl_lines="10 17-18 26-27 35 37-42 45" +from aws_xray_sdk.core import patch_all, xray_recorder + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver +from aws_lambda_powertools.logging import correlation_paths + +logger = Logger(service="APP") + +app = ApiGatewayResolver() +cold_start = True +patch_all() + + +@app.get("/hello/") +@xray_recorder.capture('hello_name') +def hello_name(name): + subsegment = xray_recorder.current_subsegment() + subsegment.put_annotation(key="User", value=name) + logger.info(f"Request from {name} received") + return {"message": f"hello {name}!"} + + +@app.get("/hello") +@xray_recorder.capture('hello') +def hello(): + subsegment = xray_recorder.current_subsegment() + subsegment.put_annotation(key="User", value="unknown") + logger.info("Request from unknown received") + return {"message": "hello unknown!"} + + +@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST, log_event=True) +@xray_recorder.capture('handler') +def lambda_handler(event, context): + global cold_start + + subsegment = xray_recorder.current_subsegment() + if cold_start: + subsegment.put_annotation(key="ColdStart", value=cold_start) + cold_start = False + else: + subsegment.put_annotation(key="ColdStart", value=cold_start) + + result = app.resolve(event, context) + subsegment.put_metadata("response", result) + + return result +``` + +Let's break it down: + +* **L10**: We track Lambda cold start by setting global variable outside the handler; this is executed once per sandbox Lambda creates. This information provides an overview of how often the sandbox is reused by Lambda, which directly impacts the performance of each transaction. +* **L17-18**: We use AWS X-Ray SDK to add `User` annotation on `hello_name` subsegment. This will allow us to filter traces using the `User` value. +* **L26-27**: We repeat what we did in L17-18 except we use the value `unknown` since we don't have that information. +* **L35**: We use `global` to modify our global variable defined in the outer scope. +* **37-42**: We add `ColdStart` annotation and flip the value of `cold_start` variable, so that subsequent requests annotates the value `false` when the sandbox is reused. +* **L45**: We include the final response under `response` key as part of the `handler` subsegment. + +???+ info + If you want to understand how the Lambda execution environment (sandbox) works and why cold starts can occur, see this [blog series on Lambda performance](https://aws.amazon.com/blogs/compute/operating-lambda-performance-optimization-part-1/). + +Repeat the process of building, deploying, and invoking your application via the API endpoint. Within the [AWS X-Ray Console](https://console.aws.amazon.com/xray/home#/traces/){target="_blank"}, you should now be able to group traces by the `User` and `ColdStart` annotation. + +![Filtering traces by annotations](../media/tracer_xray_sdk_enriched.png) + +If you choose any of the traces available, try opening the `handler` subsegment and you should see the response of your Lambda function under the `Metadata` tab. + +![Filtering traces by metadata](../media/tracer_xray_sdk_enriched_2.png) + +### Simplifying with Tracer + +Cross-cutting concerns like filtering traces by Cold Start, including response as well as exceptions as tracing metadata can take a considerable amount of boilerplate. + +We can simplify our previous patterns by using [Lambda Powertools Tracer](core/tracer.md){target="_blank"}; a thin wrapper on top of X-Ray SDK. + +???+ note + You can now safely remove `aws-xray-sdk` from `requirements.txt`; keep `aws-lambda-powertools` only. + +```python title="Refactoring with Lambda Powertools Tracer" hl_lines="1 6 11 13 19 21 27" +from aws_lambda_powertools import Logger, Tracer +from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver +from aws_lambda_powertools.logging import correlation_paths + +logger = Logger(service="APP") +tracer = Tracer(service="APP") +app = ApiGatewayResolver() + + +@app.get("/hello/") +@tracer.capture_method +def hello_name(name): + tracer.put_annotation(key="User", value=name) + logger.info(f"Request from {name} received") + return {"message": f"hello {name}!"} + + +@app.get("/hello") +@tracer.capture_method +def hello(): + tracer.put_annotation(key="User", value="unknown") + logger.info("Request from unknown received") + return {"message": "hello unknown!"} + + +@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST, log_event=True) +@tracer.capture_lambda_handler +def lambda_handler(event, context): + return app.resolve(event, context) +``` + +Decorators, annotations and metadata are largely the same, except we now have a much cleaner code as the boilerplate is gone. Here's what's changed compared to AWS X-Ray SDK approach: + +* **L6**: We initialize `Tracer` and define the name of our service (`APP`). We automatically run `patch_all` from AWS X-Ray SDK on your behalf. Any previously patched or non-imported library is simply ignored. +* **L11**: We use `@tracer.capture_method` decorator instead of `xray_recorder.capture`. We automatically create a subsegment named after the function name (`## hello_name`), and add the response/exception as tracing metadata. +* **L13**: Putting annotations remain exactly the same UX. +* **L27**: We use `@tracer.lambda_handler` so we automatically add `ColdStart` annotation within Tracer itself. We also add a new `Service` annotation using the value of `Tracer(service="APP")`, so that you can filter traces by the service your function(s) represent. + +Another subtle difference is that you can now run your Lambda functions and unit test them locally without having to explicitly disable Tracer. + +Lambda Powertools optimizes for Lambda compute environment. As such, we add these and other common approaches to accelerate your development, so you don't worry about implementing every cross-cutting concern. + +???+ tip + You can [opt-out some of these behaviours](./core/tracer/#advanced){target="_blank"} like disabling response capturing, explicitly patching only X modules, etc. + +Repeat the process of building, deploying, and invoking your application via the API endpoint. Within the [AWS X-Ray Console](https://console.aws.amazon.com/xray/home#/traces/){target="_blank"}, you should see a similar view: + + +![AWS X-Ray Console trace view using Lambda Powertools Tracer](../media/tracer_utility_showcase_2.png) + +???+ tip + Consider using [Amazon CloudWatch ServiceLens view](https://console.aws.amazon.com/cloudwatch/home#servicelens:service-map/map){target="_blank"} as it aggregates AWS X-Ray traces and CloudWatch metrics and logs in one view. + +From here, you can browse to specific logs in CloudWatch Logs Insight, Metrics Dashboard or AWS X-Ray traces. + +![CloudWatch ServiceLens View](../media/tracer_utility_showcase_3.png) + +???+ info + For more information on Amazon CloudWatch ServiceLens, please visit [link](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ServiceLens.html). + +## Custom Metrics + +### Creating metrics + +Let's add custom metrics to better understand our application and business behavior (e.g. number of reservations, etc.). + +Out of the box, AWS Lambda adds [invocation, performance, and concurrency metrics](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-metrics.html#monitoring-metrics-types){target="_blank"}. Amazon API Gateway also adds [general metrics at the aggregate level](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html#api-gateway-metrics) such as latency, number of requests received, etc. + +???+ tip + You can [optionally enable detailed metrics](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html#api-gateway-metricdimensions){target="_blank"} per each API route, stage, and method in API Gateway. + +Let's expand our application with custom metrics using AWS SDK to see how it works, then let's upgrade it with Lambda Powertools :-) + +=== "app.py" + + ```python hl_lines="3 10 14 19-47 55 64" + import os + + import boto3 + + from aws_lambda_powertools import Logger, Tracer + from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver + from aws_lambda_powertools.logging import correlation_paths + + cold_start = True + metric_namespace = "MyApp" + + logger = Logger(service="APP") + tracer = Tracer(service="APP") + metrics = boto3.client("cloudwatch") + app = ApiGatewayResolver() + + + @tracer.capture_method + def add_greeting_metric(service: str = "APP"): + function_name = os.getenv("AWS_LAMBDA_FUNCTION_NAME", "undefined") + service_dimension = {"Name": "service", "Value": service} + function_dimension = {"Name": "function_name": "Value": function_name} + is_cold_start = True + + global cold_start + if cold_start: + cold_start = False + else: + is_cold_start = False + + return metrics.put_metric_data( + MetricData=[ + { + "MetricName": "SuccessfulGreetings", + "Dimensions": [service_dimension], + "Unit": "Count", + "Value": 1, + }, + { + "MetricName": "ColdStart", + "Dimensions": [service_dimension, function_dimension], + "Unit": "Count", + "Value": int(is_cold_start) + } + ], + Namespace=metric_namespace, + ) + + + @app.get("/hello/") + @tracer.capture_method + def hello_name(name): + tracer.put_annotation(key="User", value=name) + logger.info(f"Request from {name} received") + add_greeting_metric() + return {"message": f"hello {name}!"} + + + @app.get("/hello") + @tracer.capture_method + def hello(): + tracer.put_annotation(key="User", value="unknown") + logger.info("Request from unknown received") + add_greeting_metric() + return {"message": "hello unknown!"} + + + @logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST, log_event=True) + @tracer.capture_lambda_handler + def lambda_handler(event, context): + return app.resolve(event, context) + ``` + +=== "template.yaml" + + ```yaml hl_lines="27 28" + AWSTemplateFormatVersion: "2010-09-09" + Transform: AWS::Serverless-2016-10-31 + Description: Sample SAM Template for powertools-quickstart + Globals: + Function: + Timeout: 3 + Resources: + HelloWorldFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: hello_world/ + Handler: app.lambda_handler + Runtime: python3.9 + Tracing: Active + Events: + HelloWorld: + Type: Api + Properties: + Path: /hello + Method: get + HelloWorldName: + Type: Api + Properties: + Path: /hello/{name} + Method: get + Policies: + - CloudWatchPutMetricPolicy: {} + Outputs: + HelloWorldApi: + Description: "API Gateway endpoint URL for Prod stage for Hello World function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + + ``` + +There's a lot going on, let's break this down: + +* **L10**: We define a container where all of our application metrics will live `MyApp`, a.k.a [Metrics Namespace](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html){target="_blank"}. +* **L14**: We initialize a CloudWatch client to send metrics later. +* **L19-47**: We create a custom function to prepare and send `ColdStart` and `SuccessfulGreetings` metrics using CloudWatch expected data structure. We also set [dimensions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension){target="_blank"} of these metrics - Think of them as metadata to define to slice and dice them later; an unique metric is a combination of metric name + metric dimension(s). +* **L55,64**: We call our custom function to create metrics for every greeting received. + +???+ question + But what permissions do I need to send metrics to CloudWatch? + +Within `template.yaml`, we add [CloudWatchPutMetricPolicy](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-policy-template-list.html#cloudwatch-put-metric-policy){target="_blank"} policy in SAM. + +???+ note "Adding metrics via AWS SDK gives a lot of flexibility at a cost" + `put_metric_data` is a synchronous call to CloudWatch Metrics API. This means establishing a connection to CloudWatch endpoint, sending metrics payload, and waiting from a response. + + It will be visible in your AWS X-RAY traces as additional external call. Given your architecture scale, this approach might lead to disadvantages such as increased cost of measuring data collection and increased Lambda latency. + +### Simplifying with Metrics + +[Lambda Powertools Metrics](./core/metrics.md){target="_blank} uses [Amazon CloudWatch Embedded Metric Format (EMF)](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format.html) to create custom metrics **asynchronously** via a native integration with Lambda. + +In general terms, EMF is a specification that expects metrics in a JSON payload within CloudWatch Logs. Lambda ingests all logs emitted by a given function into CloudWatch Logs. CloudWatch automatically looks up for log entries that follow the EMF format and transforms them into a CloudWatch metric. + +???+ info + If you are interested in the details of the EMF mechanism, follow [blog post](https://aws.amazon.com/blogs/mt/enhancing-workload-observability-using-amazon-cloudwatch-embedded-metric-format/){target="_blank"}. + +Let's implement that using [Metrics](./core/metrics.md){target="_blank}: + +```python title="Refactoring with Lambda Powertools Metrics" hl_lines="1 4 9 18 27 33" +from aws_lambda_powertools import Logger, Tracer, Metrics +from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver +from aws_lambda_powertools.logging import correlation_paths +from aws_lambda_powertools.metrics import MetricUnit + + +logger = Logger(service="APP") +tracer = Tracer(service="APP") +metrics = Metrics(namespace="MyApp", service="APP") +app = ApiGatewayResolver() + + +@app.get("/hello/") +@tracer.capture_method +def hello_name(name): + tracer.put_annotation(key="User", value=name) + logger.info(f"Request from {name} received") + metrics.add_metric(name="SuccessfulGreetings", unit=MetricUnit.Count, value=1) + return {"message": f"hello {name}!"} + + +@app.get("/hello") +@tracer.capture_method +def hello(): + tracer.put_annotation(key="User", value="unknown") + logger.info("Request from unknown received") + metrics.add_metric(name="SuccessfulGreetings", unit=MetricUnit.Count, value=1) + return {"message": "hello unknown!"} + + +@tracer.capture_lambda_handler +@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST, log_event=True) +@metrics.log_metrics(capture_cold_start_metric=True) +def lambda_handler(event, context): + try: + return app.resolve(event, context) + except Exception as e: + logger.exception(e) + raise +``` + +That's a lot less boilerplate code! Let's break this down: + +* **L9**: We initialize `Metrics` with our service name (`APP`) and metrics namespace (`MyApp`), reducing the need to add the `service` dimension for every metric and setting the namespace later +* **L18, 27**: We use `add_metric` similarly to our custom function, except we now have an enum `MetricCount` to help us understand which Metric Units we have at our disposal +* **L33**: We use `@metrics.log_metrics` decorator to ensure that our metrics are aligned with the EMF output and validated before-hand, like in case we forget to set namespace, or accidentally use a metric unit as a string that doesn't exist in CloudWatch. +* **L33**: We also use `capture_cold_start_metric=True` so we don't have to handle that logic either. Note that [Metrics](./core/metrics.md){target="_blank"} does not publish a warm invocation metric (ColdStart=0) for cost reasons. As such, treat the absence (sparse metric) as a non-cold start invocation. + +Repeat the process of building, deploying, and invoking your application via the API endpoint a few times to generate metrics - [Artillery](https://www.artillery.io/){target="_blank"} and [K6.io](https://k6.io/open-source){target="_blank"} are quick ways to generate some load. Within [CloudWatch Metrics view](https://console.aws.amazon.com/cloudwatch/home#metricsV2:graph=~()){target="_blank}, you should see `MyApp` custom namespace with your custom metrics there and `SuccessfulGreetings` available to graph. + +![Custom Metrics Example](../media/metrics_utility_showcase.png) + +If you're curious about how the EMF portion of your function logs look like, you can quickly go to [CloudWatch ServiceLens view](https://console.aws.amazon.com/cloudwatch/home#servicelens:service-map/map){target="_blank"}, choose your function and open logs. You will see a similar entry that looks like this: + +```json +{ + "_aws": { + "Timestamp": 1638115724269, + "CloudWatchMetrics": [ + { + "Namespace": "CustomMetrics", + "Dimensions": [ + [ + "method", + "service" + ] + ], + "Metrics": [ + { + "Name": "AppMethodsInvocations", + "Unit": "Count" + } + ] + } + ] + }, + "method": "/hello/", + "service": "APP", + "AppMethodsInvocations": [ + 1 + ] +} +``` + +# Final considerations + +We covered a lot of ground here and we only scratched the surface of the feature set available within Lambda Powertools. + +When it comes to the observability features ([Tracer](./core/tracer.md){target="_blank"}, [Metrics](./core/metrics.md){target="_blank"}, [Logging](./core/logger.md){target="_blank"}), don't stop there! The goal here is to ensure you can ask arbitrary questions to assess your system's health; these features are only part of the wider story! + +This requires a change in mindset to ensure operational excellence is part of the software development lifecycle. + +???+ tip + You can find more details on other leading practices described in the [Well-Architected Serverless Lens](https://aws.amazon.com/blogs/aws/new-serverless-lens-in-aws-well-architected-tool/). + + Lambda Powertools is largely designed to make some of these practices easier to adopt from day 1. + +???+ question "Have ideas for other tutorials?" + You can open up a [documentation issue](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=documentation&template=documentation-improvements.md&title=Tutorial%20Suggestion){target="_blank"}, or connect with us on the [AWS Developers Slack](https://github.com/awslabs/aws-lambda-powertools-python/#connect) at `lambda-powertools` channel, or via e-mail [aws-lambda-powertools-feedback@amazon.com](mailto:aws-lambda-powertools-feedback@amazon.com). diff --git a/docs/utilities/batch.md b/docs/utilities/batch.md index cdfb51549ae..14dc80bdb11 100644 --- a/docs/utilities/batch.md +++ b/docs/utilities/batch.md @@ -285,7 +285,7 @@ Processing batches from SQS works in four stages: @tracer.capture_lambda_handler def lambda_handler(event, context: LambdaContext): batch = event["Records"] - with processor(records=batch, processor=processor): + with processor(records=batch, handler=record_handler): processed_messages = processor.process() # kick off processing, return list[tuple] return processor.response() @@ -413,7 +413,7 @@ Processing batches from Kinesis works in four stages: @tracer.capture_lambda_handler def lambda_handler(event, context: LambdaContext): batch = event["Records"] - with processor(records=batch, processor=processor): + with processor(records=batch, handler=record_handler): processed_messages = processor.process() # kick off processing, return list[tuple] return processor.response() @@ -549,7 +549,7 @@ Processing batches from Kinesis works in four stages: @tracer.capture_lambda_handler def lambda_handler(event, context: LambdaContext): batch = event["Records"] - with processor(records=batch, processor=processor): + with processor(records=batch, handler=record_handler): processed_messages = processor.process() # kick off processing, return list[tuple] return processor.response() @@ -821,14 +821,13 @@ def record_handler(record: SQSRecord): @tracer.capture_lambda_handler def lambda_handler(event, context: LambdaContext): batch = event["Records"] - with processor(records=batch, processor=processor): + with processor(records=batch, handler=record_handler): processed_messages: List[Union[SuccessResponse, FailureResponse]] = processor.process() - for messages in processed_messages: - for message in messages: - status: Union[Literal["success"], Literal["fail"]] = message[0] - result: Any = message[1] - record: SQSRecord = message[2] + for message in processed_messages: + status: Union[Literal["success"], Literal["fail"]] = message[0] + result: Any = message[1] + record: SQSRecord = message[2] return processor.response() @@ -922,7 +921,7 @@ class MyPartialProcessor(BasePartialProcessor): def _clean(self): # It's called once, *after* closing processing all records (closing the context manager) # Here we're sending, at once, all successful messages to a ddb table - with ddb_table.batch_writer() as batch: + with self.ddb_table.batch_writer() as batch: for result in self.success_messages: batch.put_item(Item=result) diff --git a/mkdocs.yml b/mkdocs.yml index 218deea586b..7f8366675ea 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -7,6 +7,7 @@ edit_uri: edit/develop/docs nav: - Homepage: index.md - Changelog: changelog.md + - Tutorial: tutorial/index.md - Roadmap: https://github.com/awslabs/aws-lambda-powertools-roadmap/projects/1" target="_blank - API reference: api/" target="_blank - Core utilities: @@ -14,8 +15,8 @@ nav: - core/logger.md - core/metrics.md - Event Handler: - - core/event_handler/appsync.md - - core/event_handler/api_gateway.md + - core/event_handler/api_gateway.md + - core/event_handler/appsync.md - Utilities: - utilities/middleware_factory.md - utilities/parameters.md @@ -50,6 +51,7 @@ theme: - navigation.expand - navigation.top - navigation.instant + - navigation.indexes icon: repo: fontawesome/brands/github logo: media/aws-logo-light.svg @@ -64,7 +66,7 @@ markdown_extensions: - pymdownx.superfences - pymdownx.details - pymdownx.snippets: - base_path: '.' + base_path: "." check_paths: true - meta - toc: diff --git a/poetry.lock b/poetry.lock index 6b71a28bc6f..1ed434d9afa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -597,7 +597,7 @@ python-versions = ">=3.6" [[package]] name = "mypy" -version = "0.930" +version = "0.931" description = "Optional static typing for Python" category = "dev" optional = false @@ -705,7 +705,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "pydantic" -version = "1.8.2" +version = "1.9.0" description = "Data validation and settings management using python 3.6 type hinting" category = "main" optional = true @@ -1056,7 +1056,7 @@ pydantic = ["pydantic", "email-validator"] [metadata] lock-version = "1.1" python-versions = "^3.6.2" -content-hash = "364d0964de7151fa587584ef923d44440007f8e46933c038440c79f242e3e3fa" +content-hash = "c1df73741840dc9ee5e8dbbf6bfa02e09d3c84f53318e3e36cba9b30f6f2d2e4" [metadata.files] atomicwrites = [ @@ -1361,26 +1361,26 @@ mkdocs-material-extensions = [ {file = "mkdocs_material_extensions-1.0.3-py3-none-any.whl", hash = "sha256:a82b70e533ce060b2a5d9eb2bc2e1be201cf61f901f93704b4acf6e3d5983a44"}, ] mypy = [ - {file = "mypy-0.930-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:221cc94dc6a801ccc2be7c0c9fd791c5e08d1fa2c5e1c12dec4eab15b2469871"}, - {file = "mypy-0.930-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db3a87376a1380f396d465bed462e76ea89f838f4c5e967d68ff6ee34b785c31"}, - {file = "mypy-0.930-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1d2296f35aae9802eeb1327058b550371ee382d71374b3e7d2804035ef0b830b"}, - {file = "mypy-0.930-cp310-cp310-win_amd64.whl", hash = "sha256:959319b9a3cafc33a8185f440a433ba520239c72e733bf91f9efd67b0a8e9b30"}, - {file = "mypy-0.930-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:45a4dc21c789cfd09b8ccafe114d6de66f0b341ad761338de717192f19397a8c"}, - {file = "mypy-0.930-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1e689e92cdebd87607a041585f1dc7339aa2e8a9f9bad9ba7e6ece619431b20c"}, - {file = "mypy-0.930-cp36-cp36m-win_amd64.whl", hash = "sha256:ed4e0ea066bb12f56b2812a15ff223c57c0a44eca817ceb96b214bb055c7051f"}, - {file = "mypy-0.930-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a9d8dffefba634b27d650e0de2564379a1a367e2e08d6617d8f89261a3bf63b2"}, - {file = "mypy-0.930-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b419e9721260161e70d054a15abbd50603c16f159860cfd0daeab647d828fc29"}, - {file = "mypy-0.930-cp37-cp37m-win_amd64.whl", hash = "sha256:601f46593f627f8a9b944f74fd387c9b5f4266b39abad77471947069c2fc7651"}, - {file = "mypy-0.930-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ea7199780c1d7940b82dbc0a4e37722b4e3851264dbba81e01abecc9052d8a7"}, - {file = "mypy-0.930-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:70b197dd8c78fc5d2daf84bd093e8466a2b2e007eedaa85e792e513a820adbf7"}, - {file = "mypy-0.930-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5feb56f8bb280468fe5fc8e6f56f48f99aa0df9eed3c507a11505ee4657b5380"}, - {file = "mypy-0.930-cp38-cp38-win_amd64.whl", hash = "sha256:2e9c5409e9cb81049bb03fa1009b573dea87976713e3898561567a86c4eaee01"}, - {file = "mypy-0.930-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:554873e45c1ca20f31ddf873deb67fa5d2e87b76b97db50669f0468ccded8fae"}, - {file = "mypy-0.930-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0feb82e9fa849affca7edd24713dbe809dce780ced9f3feca5ed3d80e40b777f"}, - {file = "mypy-0.930-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bc1a0607ea03c30225347334af66b0af12eefba018a89a88c209e02b7065ea95"}, - {file = "mypy-0.930-cp39-cp39-win_amd64.whl", hash = "sha256:f9f665d69034b1fcfdbcd4197480d26298bbfb5d2dfe206245b6498addb34999"}, - {file = "mypy-0.930-py3-none-any.whl", hash = "sha256:bf4a44e03040206f7c058d1f5ba02ef2d1820720c88bc4285c7d9a4269f54173"}, - {file = "mypy-0.930.tar.gz", hash = "sha256:51426262ae4714cc7dd5439814676e0992b55bcc0f6514eccb4cf8e0678962c2"}, + {file = "mypy-0.931-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3c5b42d0815e15518b1f0990cff7a705805961613e701db60387e6fb663fe78a"}, + {file = "mypy-0.931-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c89702cac5b302f0c5d33b172d2b55b5df2bede3344a2fbed99ff96bddb2cf00"}, + {file = "mypy-0.931-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:300717a07ad09525401a508ef5d105e6b56646f7942eb92715a1c8d610149714"}, + {file = "mypy-0.931-cp310-cp310-win_amd64.whl", hash = "sha256:7b3f6f557ba4afc7f2ce6d3215d5db279bcf120b3cfd0add20a5d4f4abdae5bc"}, + {file = "mypy-0.931-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1bf752559797c897cdd2c65f7b60c2b6969ffe458417b8d947b8340cc9cec08d"}, + {file = "mypy-0.931-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4365c60266b95a3f216a3047f1d8e3f895da6c7402e9e1ddfab96393122cc58d"}, + {file = "mypy-0.931-cp36-cp36m-win_amd64.whl", hash = "sha256:1b65714dc296a7991000b6ee59a35b3f550e0073411ac9d3202f6516621ba66c"}, + {file = "mypy-0.931-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e839191b8da5b4e5d805f940537efcaa13ea5dd98418f06dc585d2891d228cf0"}, + {file = "mypy-0.931-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:50c7346a46dc76a4ed88f3277d4959de8a2bd0a0fa47fa87a4cde36fe247ac05"}, + {file = "mypy-0.931-cp37-cp37m-win_amd64.whl", hash = "sha256:d8f1ff62f7a879c9fe5917b3f9eb93a79b78aad47b533911b853a757223f72e7"}, + {file = "mypy-0.931-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f9fe20d0872b26c4bba1c1be02c5340de1019530302cf2dcc85c7f9fc3252ae0"}, + {file = "mypy-0.931-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1b06268df7eb53a8feea99cbfff77a6e2b205e70bf31743e786678ef87ee8069"}, + {file = "mypy-0.931-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8c11003aaeaf7cc2d0f1bc101c1cc9454ec4cc9cb825aef3cafff8a5fdf4c799"}, + {file = "mypy-0.931-cp38-cp38-win_amd64.whl", hash = "sha256:d9d2b84b2007cea426e327d2483238f040c49405a6bf4074f605f0156c91a47a"}, + {file = "mypy-0.931-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ff3bf387c14c805ab1388185dd22d6b210824e164d4bb324b195ff34e322d166"}, + {file = "mypy-0.931-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5b56154f8c09427bae082b32275a21f500b24d93c88d69a5e82f3978018a0266"}, + {file = "mypy-0.931-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ca7f8c4b1584d63c9a0f827c37ba7a47226c19a23a753d52e5b5eddb201afcd"}, + {file = "mypy-0.931-cp39-cp39-win_amd64.whl", hash = "sha256:74f7eccbfd436abe9c352ad9fb65872cc0f1f0a868e9d9c44db0893440f0c697"}, + {file = "mypy-0.931-py3-none-any.whl", hash = "sha256:1171f2e0859cfff2d366da2c7092b06130f232c636a3f7301e3feb8b41f6377d"}, + {file = "mypy-0.931.tar.gz", hash = "sha256:0038b21890867793581e4cb0d810829f5fd4441aa75796b53033af3aa30430ce"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, @@ -1419,28 +1419,41 @@ pycodestyle = [ {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"}, ] pydantic = [ - {file = "pydantic-1.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:05ddfd37c1720c392f4e0d43c484217b7521558302e7069ce8d318438d297739"}, - {file = "pydantic-1.8.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a7c6002203fe2c5a1b5cbb141bb85060cbff88c2d78eccbc72d97eb7022c43e4"}, - {file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:589eb6cd6361e8ac341db97602eb7f354551482368a37f4fd086c0733548308e"}, - {file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:10e5622224245941efc193ad1d159887872776df7a8fd592ed746aa25d071840"}, - {file = "pydantic-1.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:99a9fc39470010c45c161a1dc584997f1feb13f689ecf645f59bb4ba623e586b"}, - {file = "pydantic-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a83db7205f60c6a86f2c44a61791d993dff4b73135df1973ecd9eed5ea0bda20"}, - {file = "pydantic-1.8.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:41b542c0b3c42dc17da70554bc6f38cbc30d7066d2c2815a94499b5684582ecb"}, - {file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:ea5cb40a3b23b3265f6325727ddfc45141b08ed665458be8c6285e7b85bd73a1"}, - {file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:18b5ea242dd3e62dbf89b2b0ec9ba6c7b5abaf6af85b95a97b00279f65845a23"}, - {file = "pydantic-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:234a6c19f1c14e25e362cb05c68afb7f183eb931dd3cd4605eafff055ebbf287"}, - {file = "pydantic-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:021ea0e4133e8c824775a0cfe098677acf6fa5a3cbf9206a376eed3fc09302cd"}, - {file = "pydantic-1.8.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e710876437bc07bd414ff453ac8ec63d219e7690128d925c6e82889d674bb505"}, - {file = "pydantic-1.8.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:ac8eed4ca3bd3aadc58a13c2aa93cd8a884bcf21cb019f8cfecaae3b6ce3746e"}, - {file = "pydantic-1.8.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:4a03cbbe743e9c7247ceae6f0d8898f7a64bb65800a45cbdc52d65e370570820"}, - {file = "pydantic-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:8621559dcf5afacf0069ed194278f35c255dc1a1385c28b32dd6c110fd6531b3"}, - {file = "pydantic-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b223557f9510cf0bfd8b01316bf6dd281cf41826607eada99662f5e4963f316"}, - {file = "pydantic-1.8.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:244ad78eeb388a43b0c927e74d3af78008e944074b7d0f4f696ddd5b2af43c62"}, - {file = "pydantic-1.8.2-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:05ef5246a7ffd2ce12a619cbb29f3307b7c4509307b1b49f456657b43529dc6f"}, - {file = "pydantic-1.8.2-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:54cd5121383f4a461ff7644c7ca20c0419d58052db70d8791eacbbe31528916b"}, - {file = "pydantic-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:4be75bebf676a5f0f87937c6ddb061fa39cbea067240d98e298508c1bda6f3f3"}, - {file = "pydantic-1.8.2-py3-none-any.whl", hash = "sha256:fec866a0b59f372b7e776f2d7308511784dace622e0992a0b59ea3ccee0ae833"}, - {file = "pydantic-1.8.2.tar.gz", hash = "sha256:26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b"}, + {file = "pydantic-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb23bcc093697cdea2708baae4f9ba0e972960a835af22560f6ae4e7e47d33f5"}, + {file = "pydantic-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1d5278bd9f0eee04a44c712982343103bba63507480bfd2fc2790fa70cd64cf4"}, + {file = "pydantic-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab624700dc145aa809e6f3ec93fb8e7d0f99d9023b713f6a953637429b437d37"}, + {file = "pydantic-1.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8d7da6f1c1049eefb718d43d99ad73100c958a5367d30b9321b092771e96c25"}, + {file = "pydantic-1.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3c3b035103bd4e2e4a28da9da7ef2fa47b00ee4a9cf4f1a735214c1bcd05e0f6"}, + {file = "pydantic-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3011b975c973819883842c5ab925a4e4298dffccf7782c55ec3580ed17dc464c"}, + {file = "pydantic-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:086254884d10d3ba16da0588604ffdc5aab3f7f09557b998373e885c690dd398"}, + {file = "pydantic-1.9.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0fe476769acaa7fcddd17cadd172b156b53546ec3614a4d880e5d29ea5fbce65"}, + {file = "pydantic-1.9.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8e9dcf1ac499679aceedac7e7ca6d8641f0193c591a2d090282aaf8e9445a46"}, + {file = "pydantic-1.9.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1e4c28f30e767fd07f2ddc6f74f41f034d1dd6bc526cd59e63a82fe8bb9ef4c"}, + {file = "pydantic-1.9.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c86229333cabaaa8c51cf971496f10318c4734cf7b641f08af0a6fbf17ca3054"}, + {file = "pydantic-1.9.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c0727bda6e38144d464daec31dff936a82917f431d9c39c39c60a26567eae3ed"}, + {file = "pydantic-1.9.0-cp36-cp36m-win_amd64.whl", hash = "sha256:dee5ef83a76ac31ab0c78c10bd7d5437bfdb6358c95b91f1ba7ff7b76f9996a1"}, + {file = "pydantic-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9c9bdb3af48e242838f9f6e6127de9be7063aad17b32215ccc36a09c5cf1070"}, + {file = "pydantic-1.9.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ee7e3209db1e468341ef41fe263eb655f67f5c5a76c924044314e139a1103a2"}, + {file = "pydantic-1.9.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b6037175234850ffd094ca77bf60fb54b08b5b22bc85865331dd3bda7a02fa1"}, + {file = "pydantic-1.9.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b2571db88c636d862b35090ccf92bf24004393f85c8870a37f42d9f23d13e032"}, + {file = "pydantic-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8b5ac0f1c83d31b324e57a273da59197c83d1bb18171e512908fe5dc7278a1d6"}, + {file = "pydantic-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bbbc94d0c94dd80b3340fc4f04fd4d701f4b038ebad72c39693c794fd3bc2d9d"}, + {file = "pydantic-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e0896200b6a40197405af18828da49f067c2fa1f821491bc8f5bde241ef3f7d7"}, + {file = "pydantic-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bdfdadb5994b44bd5579cfa7c9b0e1b0e540c952d56f627eb227851cda9db77"}, + {file = "pydantic-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:574936363cd4b9eed8acdd6b80d0143162f2eb654d96cb3a8ee91d3e64bf4cf9"}, + {file = "pydantic-1.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c556695b699f648c58373b542534308922c46a1cda06ea47bc9ca45ef5b39ae6"}, + {file = "pydantic-1.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f947352c3434e8b937e3aa8f96f47bdfe6d92779e44bb3f41e4c213ba6a32145"}, + {file = "pydantic-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5e48ef4a8b8c066c4a31409d91d7ca372a774d0212da2787c0d32f8045b1e034"}, + {file = "pydantic-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:96f240bce182ca7fe045c76bcebfa0b0534a1bf402ed05914a6f1dadff91877f"}, + {file = "pydantic-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:815ddebb2792efd4bba5488bc8fde09c29e8ca3227d27cf1c6990fc830fd292b"}, + {file = "pydantic-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c5b77947b9e85a54848343928b597b4f74fc364b70926b3c4441ff52620640c"}, + {file = "pydantic-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c68c3bc88dbda2a6805e9a142ce84782d3930f8fdd9655430d8576315ad97ce"}, + {file = "pydantic-1.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a79330f8571faf71bf93667d3ee054609816f10a259a109a0738dac983b23c3"}, + {file = "pydantic-1.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f5a64b64ddf4c99fe201ac2724daada8595ada0d102ab96d019c1555c2d6441d"}, + {file = "pydantic-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a733965f1a2b4090a5238d40d983dcd78f3ecea221c7af1497b845a9709c1721"}, + {file = "pydantic-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cc6a4cb8a118ffec2ca5fcb47afbacb4f16d0ab8b7350ddea5e8ef7bcc53a16"}, + {file = "pydantic-1.9.0-py3-none-any.whl", hash = "sha256:085ca1de245782e9b46cefcf99deecc67d418737a1fd3f6a4f511344b613a5b3"}, + {file = "pydantic-1.9.0.tar.gz", hash = "sha256:742645059757a56ecd886faf4ed2441b9c0cd406079c2b4bee51bcc3fbcd510a"}, ] pyflakes = [ {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"}, diff --git a/pyproject.toml b/pyproject.toml index 0de47ef67e7..32ca7a017f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "1.24.0" +version = "1.24.1" description = "A suite of utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, batching, idempotency, feature flags, and more." authors = ["Amazon Web Services"] include = ["aws_lambda_powertools/py.typed", "THIRD-PARTY-LICENSES"] @@ -53,7 +53,7 @@ flake8-bugbear = "^21.11.29" mkdocs-material = "^7.3.6" mkdocs-git-revision-date-plugin = "^0.3.1" mike = "^0.6.0" -mypy = "^0.930" +mypy = "^0.931" [tool.poetry.extras] diff --git a/tests/functional/test_logger_utils.py b/tests/functional/test_logger_utils.py new file mode 100644 index 00000000000..1317fefc6ab --- /dev/null +++ b/tests/functional/test_logger_utils.py @@ -0,0 +1,193 @@ +import io +import json +import logging +import random +import string +from enum import Enum + +import pytest + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging import formatter, utils + + +@pytest.fixture +def stdout(): + return io.StringIO() + + +@pytest.fixture +def log_level(): + class LogLevel(Enum): + NOTSET = 0 + INFO = 20 + WARNING = 30 + CRITICAL = 50 + + return LogLevel + + +@pytest.fixture +def logger(stdout, log_level): + def _logger(): + logging.basicConfig(stream=stdout, level=log_level.NOTSET.value) + logger = logging.getLogger(name=service_name()) + return logger + + return _logger + + +def capture_logging_output(stdout): + return json.loads(stdout.getvalue().strip()) + + +def capture_multiple_logging_statements_output(stdout): + return [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line] + + +def service_name(): + chars = string.ascii_letters + string.digits + return "".join(random.SystemRandom().choice(chars) for _ in range(15)) + + +def test_copy_config_to_ext_loggers(stdout, logger, log_level): + + msg = "test message" + + # GIVEN a external logger and powertools logger initialized + logger_1 = logger() + logger_2 = logger() + + powertools_logger = Logger(service=service_name(), level=log_level.INFO.value, stream=stdout) + + # WHEN configuration copied from powertools logger to ALL external loggers AND our external logger used + utils.copy_config_to_registered_loggers(source_logger=powertools_logger) + logger_1.info(msg) + logger_2.info(msg) + logs = capture_multiple_logging_statements_output(stdout) + + # THEN + for index, logger in enumerate([logger_1, logger_2]): + assert len(logger.handlers) == 1 + assert type(logger.handlers[0]) is logging.StreamHandler + assert type(logger.handlers[0].formatter) is formatter.LambdaPowertoolsFormatter + assert logger.level == log_level.INFO.value + assert logs[index]["message"] == msg + assert logs[index]["level"] == log_level.INFO.name + + +def test_copy_config_to_ext_loggers_include(stdout, logger, log_level): + + msg = "test message" + + # GIVEN a external logger and powertools logger initialized + logger = logger() + powertools_logger = Logger(service=service_name(), level=log_level.INFO.value, stream=stdout) + + # WHEN configuration copied from powertools logger to ALL external loggers AND our external logger used + utils.copy_config_to_registered_loggers(source_logger=powertools_logger, include={logger.name}) + logger.info(msg) + log = capture_logging_output(stdout) + + # THEN + assert len(logger.handlers) == 1 + assert type(logger.handlers[0]) is logging.StreamHandler + assert type(logger.handlers[0].formatter) is formatter.LambdaPowertoolsFormatter + assert logger.level == log_level.INFO.value + assert log["message"] == msg + assert log["level"] == log_level.INFO.name + + +def test_copy_config_to_ext_loggers_wrong_include(stdout, logger, log_level): + + # GIVEN a external logger and powertools logger initialized + logger = logger() + powertools_logger = Logger(service=service_name(), level=log_level.INFO.value, stream=stdout) + + # WHEN configuration copied from powertools logger to ALL external loggers AND our external logger used + utils.copy_config_to_registered_loggers(source_logger=powertools_logger, include={"non-existing-logger"}) + + # THEN + assert not logger.handlers + + +def test_copy_config_to_ext_loggers_exclude(stdout, logger, log_level): + + # GIVEN a external logger and powertools logger initialized + logger = logger() + powertools_logger = Logger(service=service_name(), level=log_level.INFO.value, stream=stdout) + + # WHEN configuration copied from powertools logger to ALL external loggers AND our external logger used + utils.copy_config_to_registered_loggers(source_logger=powertools_logger, exclude={logger.name}) + + # THEN + assert not logger.handlers + + +def test_copy_config_to_ext_loggers_include_exclude(stdout, logger, log_level): + + msg = "test message" + + # GIVEN a external logger and powertools logger initialized + logger_1 = logger() + logger_2 = logger() + + powertools_logger = Logger(service=service_name(), level=log_level.INFO.value, stream=stdout) + + # WHEN configuration copied from powertools logger to ALL external loggers AND our external logger used + utils.copy_config_to_registered_loggers( + source_logger=powertools_logger, include={logger_1.name, logger_2.name}, exclude={logger_1.name} + ) + logger_2.info(msg) + log = capture_logging_output(stdout) + + # THEN + assert not logger_1.handlers + assert len(logger_2.handlers) == 1 + assert type(logger_2.handlers[0]) is logging.StreamHandler + assert type(logger_2.handlers[0].formatter) is formatter.LambdaPowertoolsFormatter + assert logger_2.level == log_level.INFO.value + assert log["message"] == msg + assert log["level"] == log_level.INFO.name + + +def test_copy_config_to_ext_loggers_clean_old_handlers(stdout, logger, log_level): + + # GIVEN a external logger with handler and powertools logger initialized + logger = logger() + handler = logging.FileHandler("logfile") + logger.addHandler(handler) + powertools_logger = Logger(service=service_name(), level=log_level.INFO.value, stream=stdout) + + # WHEN configuration copied from powertools logger to ALL external loggers AND our external logger used + utils.copy_config_to_registered_loggers(source_logger=powertools_logger) + + # THEN + assert len(logger.handlers) == 1 + assert type(logger.handlers[0]) is logging.StreamHandler + assert type(logger.handlers[0].formatter) is formatter.LambdaPowertoolsFormatter + + +def test_copy_config_to_ext_loggers_custom_log_level(stdout, logger, log_level): + + msg = "test message" + + # GIVEN a external logger and powertools logger initialized + logger = logger() + powertools_logger = Logger(service=service_name(), level=log_level.CRITICAL.value, stream=stdout) + level = log_level.WARNING.name + + # WHEN configuration copied from powertools logger to ALL external loggers + # AND our external logger used with custom log_level + utils.copy_config_to_registered_loggers(source_logger=powertools_logger, include={logger.name}, log_level=level) + logger.warning(msg) + log = capture_logging_output(stdout) + + # THEN + assert len(logger.handlers) == 1 + assert type(logger.handlers[0]) is logging.StreamHandler + assert type(logger.handlers[0].formatter) is formatter.LambdaPowertoolsFormatter + assert powertools_logger.level == log_level.CRITICAL.value + assert logger.level == log_level.WARNING.value + assert log["message"] == msg + assert log["level"] == log_level.WARNING.name diff --git a/tests/functional/test_utilities_batch.py b/tests/functional/test_utilities_batch.py index 3728af3111d..d32a044279b 100644 --- a/tests/functional/test_utilities_batch.py +++ b/tests/functional/test_utilities_batch.py @@ -414,7 +414,8 @@ def test_batch_processor_middleware_with_failure(sqs_event_factory, record_handl # GIVEN first_record = SQSRecord(sqs_event_factory("fail")) second_record = SQSRecord(sqs_event_factory("success")) - event = {"Records": [first_record.raw_event, second_record.raw_event]} + third_record = SQSRecord(sqs_event_factory("fail")) + event = {"Records": [first_record.raw_event, second_record.raw_event, third_record.raw_event]} processor = BatchProcessor(event_type=EventType.SQS) @@ -426,7 +427,7 @@ def lambda_handler(event, context): result = lambda_handler(event, {}) # THEN - assert len(result["batchItemFailures"]) == 1 + assert len(result["batchItemFailures"]) == 2 def test_batch_processor_context_success_only(sqs_event_factory, record_handler): @@ -453,7 +454,8 @@ def test_batch_processor_context_with_failure(sqs_event_factory, record_handler) # GIVEN first_record = SQSRecord(sqs_event_factory("failure")) second_record = SQSRecord(sqs_event_factory("success")) - records = [first_record.raw_event, second_record.raw_event] + third_record = SQSRecord(sqs_event_factory("fail")) + records = [first_record.raw_event, second_record.raw_event, third_record.raw_event] processor = BatchProcessor(event_type=EventType.SQS) # WHEN @@ -462,8 +464,10 @@ def test_batch_processor_context_with_failure(sqs_event_factory, record_handler) # THEN assert processed_messages[1] == ("success", second_record.body, second_record.raw_event) - assert len(batch.fail_messages) == 1 - assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record.message_id}]} + assert len(batch.fail_messages) == 2 + assert batch.response() == { + "batchItemFailures": [{"itemIdentifier": first_record.message_id}, {"itemIdentifier": third_record.message_id}] + } def test_batch_processor_kinesis_context_success_only(kinesis_event_factory, kinesis_record_handler): @@ -491,8 +495,9 @@ def test_batch_processor_kinesis_context_with_failure(kinesis_event_factory, kin # GIVEN first_record = KinesisStreamRecord(kinesis_event_factory("failure")) second_record = KinesisStreamRecord(kinesis_event_factory("success")) + third_record = KinesisStreamRecord(kinesis_event_factory("failure")) - records = [first_record.raw_event, second_record.raw_event] + records = [first_record.raw_event, second_record.raw_event, third_record.raw_event] processor = BatchProcessor(event_type=EventType.KinesisDataStreams) # WHEN @@ -501,15 +506,21 @@ def test_batch_processor_kinesis_context_with_failure(kinesis_event_factory, kin # THEN assert processed_messages[1] == ("success", b64_to_str(second_record.kinesis.data), second_record.raw_event) - assert len(batch.fail_messages) == 1 - assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record.kinesis.sequence_number}]} + assert len(batch.fail_messages) == 2 + assert batch.response() == { + "batchItemFailures": [ + {"itemIdentifier": first_record.kinesis.sequence_number}, + {"itemIdentifier": third_record.kinesis.sequence_number}, + ] + } def test_batch_processor_kinesis_middleware_with_failure(kinesis_event_factory, kinesis_record_handler): # GIVEN first_record = KinesisStreamRecord(kinesis_event_factory("failure")) second_record = KinesisStreamRecord(kinesis_event_factory("success")) - event = {"Records": [first_record.raw_event, second_record.raw_event]} + third_record = KinesisStreamRecord(kinesis_event_factory("failure")) + event = {"Records": [first_record.raw_event, second_record.raw_event, third_record.raw_event]} processor = BatchProcessor(event_type=EventType.KinesisDataStreams) @@ -521,7 +532,7 @@ def lambda_handler(event, context): result = lambda_handler(event, {}) # THEN - assert len(result["batchItemFailures"]) == 1 + assert len(result["batchItemFailures"]) == 2 def test_batch_processor_dynamodb_context_success_only(dynamodb_event_factory, dynamodb_record_handler): @@ -548,7 +559,8 @@ def test_batch_processor_dynamodb_context_with_failure(dynamodb_event_factory, d # GIVEN first_record = dynamodb_event_factory("failure") second_record = dynamodb_event_factory("success") - records = [first_record, second_record] + third_record = dynamodb_event_factory("failure") + records = [first_record, second_record, third_record] processor = BatchProcessor(event_type=EventType.DynamoDBStreams) # WHEN @@ -557,15 +569,21 @@ def test_batch_processor_dynamodb_context_with_failure(dynamodb_event_factory, d # THEN assert processed_messages[1] == ("success", second_record["dynamodb"]["NewImage"]["Message"]["S"], second_record) - assert len(batch.fail_messages) == 1 - assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record["dynamodb"]["SequenceNumber"]}]} + assert len(batch.fail_messages) == 2 + assert batch.response() == { + "batchItemFailures": [ + {"itemIdentifier": first_record["dynamodb"]["SequenceNumber"]}, + {"itemIdentifier": third_record["dynamodb"]["SequenceNumber"]}, + ] + } def test_batch_processor_dynamodb_middleware_with_failure(dynamodb_event_factory, dynamodb_record_handler): # GIVEN first_record = dynamodb_event_factory("failure") second_record = dynamodb_event_factory("success") - event = {"Records": [first_record, second_record]} + third_record = dynamodb_event_factory("failure") + event = {"Records": [first_record, second_record, third_record]} processor = BatchProcessor(event_type=EventType.DynamoDBStreams) @@ -577,7 +595,7 @@ def lambda_handler(event, context): result = lambda_handler(event, {}) # THEN - assert len(result["batchItemFailures"]) == 1 + assert len(result["batchItemFailures"]) == 2 def test_batch_processor_context_model(sqs_event_factory, order_event_factory): @@ -639,8 +657,9 @@ def record_handler(record: OrderSqs): order_event = order_event_factory({"type": "success"}) order_event_fail = order_event_factory({"type": "fail"}) first_record = sqs_event_factory(order_event_fail) + third_record = sqs_event_factory(order_event_fail) second_record = sqs_event_factory(order_event) - records = [first_record, second_record] + records = [first_record, second_record, third_record] # WHEN processor = BatchProcessor(event_type=EventType.SQS, model=OrderSqs) @@ -648,8 +667,13 @@ def record_handler(record: OrderSqs): batch.process() # THEN - assert len(batch.fail_messages) == 1 - assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record["messageId"]}]} + assert len(batch.fail_messages) == 2 + assert batch.response() == { + "batchItemFailures": [ + {"itemIdentifier": first_record["messageId"]}, + {"itemIdentifier": third_record["messageId"]}, + ] + } def test_batch_processor_dynamodb_context_model(dynamodb_event_factory, order_event_factory): @@ -726,7 +750,8 @@ def record_handler(record: OrderDynamoDBRecord): order_event_fail = order_event_factory({"type": "fail"}) first_record = dynamodb_event_factory(order_event_fail) second_record = dynamodb_event_factory(order_event) - records = [first_record, second_record] + third_record = dynamodb_event_factory(order_event_fail) + records = [first_record, second_record, third_record] # WHEN processor = BatchProcessor(event_type=EventType.DynamoDBStreams, model=OrderDynamoDBRecord) @@ -734,8 +759,13 @@ def record_handler(record: OrderDynamoDBRecord): batch.process() # THEN - assert len(batch.fail_messages) == 1 - assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record["dynamodb"]["SequenceNumber"]}]} + assert len(batch.fail_messages) == 2 + assert batch.response() == { + "batchItemFailures": [ + {"itemIdentifier": first_record["dynamodb"]["SequenceNumber"]}, + {"itemIdentifier": third_record["dynamodb"]["SequenceNumber"]}, + ] + } def test_batch_processor_kinesis_context_parser_model(kinesis_event_factory, order_event_factory): @@ -807,7 +837,8 @@ def record_handler(record: OrderKinesisRecord): first_record = kinesis_event_factory(order_event_fail) second_record = kinesis_event_factory(order_event) - records = [first_record, second_record] + third_record = kinesis_event_factory(order_event_fail) + records = [first_record, second_record, third_record] # WHEN processor = BatchProcessor(event_type=EventType.KinesisDataStreams, model=OrderKinesisRecord) @@ -815,8 +846,13 @@ def record_handler(record: OrderKinesisRecord): batch.process() # THEN - assert len(batch.fail_messages) == 1 - assert batch.response() == {"batchItemFailures": [{"itemIdentifier": first_record["kinesis"]["sequenceNumber"]}]} + assert len(batch.fail_messages) == 2 + assert batch.response() == { + "batchItemFailures": [ + {"itemIdentifier": first_record["kinesis"]["sequenceNumber"]}, + {"itemIdentifier": third_record["kinesis"]["sequenceNumber"]}, + ] + } def test_batch_processor_error_when_entire_batch_fails(sqs_event_factory, record_handler):