From a35b989cf7df2ce7aace36057dc83189ef79f780 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 8 Jul 2022 15:34:57 +0200 Subject: [PATCH 1/5] docs(tracer): match code snippet name with filename --- docs/core/tracer.md | 16 ++++++++-------- examples/tracer/src/tracer_reuse.py | 2 +- ...r_reuse_payment.py => tracer_reuse_module.py} | 0 3 files changed, 9 insertions(+), 9 deletions(-) rename examples/tracer/src/{tracer_reuse_payment.py => tracer_reuse_module.py} (100%) diff --git a/docs/core/tracer.md b/docs/core/tracer.md index 7664231cc31..8fbfc0e29f7 100644 --- a/docs/core/tracer.md +++ b/docs/core/tracer.md @@ -77,19 +77,19 @@ You can trace synchronous functions using the `capture_method` decorator. You can trace asynchronous functions and generator functions (including context managers) using `capture_method`. -=== "Async" +=== "capture_method_async.py" ```python hl_lines="9" --8<-- "examples/tracer/src/capture_method_async.py" ``` -=== "Context manager" +=== "capture_method_context_manager.py" ```python hl_lines="12-13" --8<-- "examples/tracer/src/capture_method_context_manager.py" ``` -=== "Generators" +=== "capture_method_generators.py" ```python hl_lines="9" --8<-- "examples/tracer/src/capture_method_generators.py" @@ -116,13 +116,13 @@ Use **`capture_response=False`** parameter in both `capture_lambda_handler` and 2. You might manipulate **streaming objects that can be read only once**; this prevents subsequent calls from being empty 3. You might return **more than 64K** of data _e.g., `message too long` error_ -=== "sensitive_data_scenario.py" +=== "disable_capture_response.py" ```python hl_lines="8 15" --8<-- "examples/tracer/src/disable_capture_response.py" ``` -=== "streaming_object_scenario.py" +=== "disable_capture_response_streaming_body.py" ```python hl_lines="19" --8<-- "examples/tracer/src/disable_capture_response_streaming_body.py" @@ -192,17 +192,17 @@ Tracer keeps a copy of its configuration after the first initialization. This is Tracer will automatically ignore imported modules that have been patched. -=== "handler.py" +=== "tracer_reuse.py" ```python hl_lines="1 6" --8<-- "examples/tracer/src/tracer_reuse.py" ``` -=== "tracer_reuse_payment.py" +=== "tracer_reuse_module.py" A new instance of Tracer will be created but will reuse the previous Tracer instance configuration, similar to a Singleton. ```python hl_lines="3" - --8<-- "examples/tracer/src/tracer_reuse_payment.py" + --8<-- "examples/tracer/src/tracer_reuse_module.py" ``` ## Testing your code diff --git a/examples/tracer/src/tracer_reuse.py b/examples/tracer/src/tracer_reuse.py index 5f12f82b714..bdfe7bc9d91 100644 --- a/examples/tracer/src/tracer_reuse.py +++ b/examples/tracer/src/tracer_reuse.py @@ -1,4 +1,4 @@ -from tracer_reuse_payment import collect_payment +from tracer_reuse_module import collect_payment from aws_lambda_powertools import Tracer from aws_lambda_powertools.utilities.typing import LambdaContext diff --git a/examples/tracer/src/tracer_reuse_payment.py b/examples/tracer/src/tracer_reuse_module.py similarity index 100% rename from examples/tracer/src/tracer_reuse_payment.py rename to examples/tracer/src/tracer_reuse_module.py From 869b27d2e2dbf7681e727de469731f7888d61eb6 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 8 Jul 2022 15:35:23 +0200 Subject: [PATCH 2/5] docs(logger): match code snippet name with filename --- docs/core/logger.md | 111 +++++++++--------- ...son => append_and_remove_keys_output.json} | 0 ....json => set_correlation_id_jmespath.json} | 0 ...nt.json => set_correlation_id_method.json} | 0 4 files changed, 58 insertions(+), 53 deletions(-) rename examples/logger/src/{append_and_remove_keys.json => append_and_remove_keys_output.json} (100%) rename examples/logger/src/{set_correlation_id_jmespath_event.json => set_correlation_id_jmespath.json} (100%) rename examples/logger/src/{set_correlation_id_method_event.json => set_correlation_id_method.json} (100%) diff --git a/docs/core/logger.md b/docs/core/logger.md index b09cc6c85d3..c699568b349 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -48,13 +48,13 @@ Your Logger will include the following keys to your structured logging: You can enrich your structured logs with key Lambda context information via `inject_lambda_context`. -=== "collect.py" +=== "inject_lambda_context.py" ```python hl_lines="7" --8<-- "examples/logger/src/inject_lambda_context.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "inject_lambda_context_output.json" ```json hl_lines="8-12 17-20" --8<-- "examples/logger/src/inject_lambda_context_output.json" @@ -88,19 +88,19 @@ You can set a Correlation ID using `correlation_id_path` param by passing a [JME ???+ tip You can retrieve correlation IDs via `get_correlation_id` method -=== "collect.py" +=== "set_correlation_id.py" ```python hl_lines="7" --8<-- "examples/logger/src/set_correlation_id.py" ``` -=== "Example Event" +=== "set_correlation_id_event.json" ```json hl_lines="3" --8<-- "examples/logger/src/set_correlation_id_event.json" ``` -=== "Example CloudWatch Logs excerpt" +=== "set_correlation_id_output.json" ```json hl_lines="12" --8<-- "examples/logger/src/set_correlation_id_output.json" @@ -110,18 +110,19 @@ You can set a Correlation ID using `correlation_id_path` param by passing a [JME You can also use `set_correlation_id` method to inject it anywhere else in your code. Example below uses [Event Source Data Classes utility](../utilities/data_classes.md) to easily access events properties. -=== "collect.py" +=== "set_correlation_id_method.py" ```python hl_lines="11" --8<-- "examples/logger/src/set_correlation_id_method.py" ``` -=== "Example Event" + +=== "set_correlation_id_method.json" ```json hl_lines="3" - --8<-- "examples/logger/src/set_correlation_id_method_event.json" + --8<-- "examples/logger/src/set_correlation_id_method.json" ``` -=== "Example CloudWatch Logs excerpt" +=== "set_correlation_id_method_output.json" ```json hl_lines="7" --8<-- "examples/logger/src/set_correlation_id_method_output.json" @@ -131,19 +132,19 @@ You can also use `set_correlation_id` method to inject it anywhere else in your To ease routine tasks like extracting correlation ID from popular event sources, we provide [built-in JMESPath expressions](#built-in-correlation-id-expressions). -=== "collect.py" +=== "set_correlation_id_jmespath.py" ```python hl_lines="2 8" --8<-- "examples/logger/src/set_correlation_id_jmespath.py" ``` -=== "Example Event" +=== "set_correlation_id_jmespath.json" ```json hl_lines="3" - --8<-- "examples/logger/src/set_correlation_id_jmespath_event.json" + --8<-- "examples/logger/src/set_correlation_id_jmespath.json" ``` -=== "Example CloudWatch Logs excerpt" +=== "set_correlation_id_jmespath_output.json" ```json hl_lines="12" --8<-- "examples/logger/src/set_correlation_id_jmespath_output.json" @@ -166,12 +167,13 @@ You can append additional keys using either mechanism: You can append your own keys to your existing Logger via `append_keys(**additional_key_values)` method. -=== "collect.py" +=== "append_keys.py" ```python hl_lines="12" --8<-- "examples/logger/src/append_keys.py" ``` -=== "Example CloudWatch Logs excerpt" + +=== "append_keys_output.json" ```json hl_lines="7" --8<-- "examples/logger/src/append_keys_output.json" @@ -191,12 +193,13 @@ It accepts any dictionary, and all keyword arguments will be added as part of th ???+ info Any keyword argument added using `extra` will not be persisted for subsequent messages. -=== "extra_parameter.py" +=== "append_keys_extra.py" ```python hl_lines="9" --8<-- "examples/logger/src/append_keys_extra.py" ``` -=== "Example CloudWatch Logs excerpt" + +=== "append_keys_extra_output.json" ```json hl_lines="7" --8<-- "examples/logger/src/append_keys_extra_output.json" @@ -206,13 +209,13 @@ It accepts any dictionary, and all keyword arguments will be added as part of th You can remove any additional key from Logger state using `remove_keys`. -=== "collect.py" +=== "remove_keys.py" ```python hl_lines="11" --8<-- "examples/logger/src/remove_keys.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "remove_keys_output.json" ```json hl_lines="7" --8<-- "examples/logger/src/remove_keys_output.json" @@ -232,19 +235,19 @@ Logger is commonly initialized in the global scope. Due to [Lambda Execution Con You can either avoid running any code as part of Lambda Layers global scope, or override keys with their latest value as part of handler's execution. -=== "collect.py" +=== "clear_state.py" ```python hl_lines="7 10" --8<-- "examples/logger/src/clear_state.py" ``` -=== "#1 request" +=== "clear_state_event_one.json" ```json hl_lines="7" --8<-- "examples/logger/src/clear_state_event_one.json" ``` -=== "#2 request" +=== "clear_state_event_two.json" ```json hl_lines="7" --8<-- "examples/logger/src/clear_state_event_two.json" @@ -257,13 +260,13 @@ Use `logger.exception` method to log contextual information about exceptions. Lo ???+ tip You can use your preferred Log Analytics tool to enumerate and visualize exceptions across all your services using `exception_name` key. -=== "collect.py" +=== "logging_exceptions.py" ```python hl_lines="15" --8<-- "examples/logger/src/logging_exceptions.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "logging_exceptions_output.json" ```json hl_lines="7-8" --8<-- "examples/logger/src/logging_exceptions_output.json" @@ -292,19 +295,19 @@ Similar to [Tracer](./tracer.md#reusing-tracer-across-your-code), a new instance Notice in the CloudWatch Logs output how `payment_id` appeared as expected when logging in `collect.py`. -=== "collect.py" +=== "logger_reuse.py" ```python hl_lines="1 9 11 12" --8<-- "examples/logger/src/logger_reuse.py" ``` -=== "payment.py" +=== "logger_reuse_payment.py" ```python hl_lines="3 7" --8<-- "examples/logger/src/logger_reuse_payment.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "logger_reuse_output.json" ```json hl_lines="12" --8<-- "examples/logger/src/logger_reuse_output.json" @@ -313,7 +316,7 @@ Notice in the CloudWatch Logs output how `payment_id` appeared as expected when ???+ note "Note: About Child Loggers" Coming from standard library, you might be used to use `logging.getLogger(__name__)`. This will create a new instance of a Logger with a different name. - In Powertools, you can have the same effect by using `child=True` parameter: `Logger(child=True)`. This creates a new Logger instance named after `service.`. All state changes will be propagated bi-directonally between Child and Parent. + In Powertools, you can have the same effect by using `child=True` parameter: `Logger(child=True)`. This creates a new Logger instance named after `service.`. All state changes will be propagated bi-directionally between Child and Parent. For that reason, there could be side effects depending on the order the Child Logger is instantiated, because Child Loggers don't have a handler. @@ -337,15 +340,15 @@ Sampling decision happens at the Logger initialization. This means sampling may ???+ note Open a [feature request](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=feature-request%2C+triage&template=feature_request.md&title=) if you want Logger to calculate sampling for every invocation -=== "collect.py" +=== "sampling_debug_logs.py" ```python hl_lines="6 10" - --8<-- "examples/logger/src/logger_reuse.py" + --8<-- "examples/logger/src/sampling_debug_logs.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "sampling_debug_logs_output.json" - ```json hl_lines="3 5 13 16 25" + ```json hl_lines="3 5 13 16 26" --8<-- "examples/logger/src/sampling_debug_logs_output.json" ``` @@ -393,13 +396,13 @@ For child Loggers, we introspect the name of your module where `Logger(child=Tru ???+ danger A common issue when migrating from other Loggers is that `service` might be defined in the parent Logger (no child param), and not defined in the child Logger: -=== "incorrect_logger_inheritance.py" +=== "logging_inheritance_bad.py" ```python hl_lines="1 9" --8<-- "examples/logger/src/logging_inheritance_bad.py" ``` -=== "my_other_module.py" +=== "logging_inheritance_module.py" ```python hl_lines="1 9" --8<-- "examples/logger/src/logging_inheritance_module.py" @@ -412,13 +415,13 @@ In this case, Logger will register a Logger named `payment`, and a Logger named Do this instead: -=== "correct_logger_inheritance.py" +=== "logging_inheritance_good.py" ```python hl_lines="1 9" --8<-- "examples/logger/src/logging_inheritance_good.py" ``` -=== "my_other_module.py" +=== "logging_inheritance_module.py" ```python hl_lines="1 9" --8<-- "examples/logger/src/logging_inheritance_module.py" @@ -435,13 +438,13 @@ You might want to continue to use the same date formatting style, or override `l Logger allows you to either change the format or suppress the following keys altogether at the initialization: `location`, `timestamp`, `level`, `xray_trace_id`. -=== "lambda_handler.py" +=== "overriding_log_records.py" ```python hl_lines="7 10" --8<-- "examples/logger/src/overriding_log_records.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "overriding_log_records_output.json" ```json hl_lines="3 5" --8<-- "examples/logger/src/overriding_log_records_output.json" @@ -451,12 +454,13 @@ Logger allows you to either change the format or suppress the following keys alt You can change the order of [standard Logger keys](#standard-structured-keys) or any keys that will be appended later at runtime via the `log_record_order` parameter. -=== "app.py" +=== "reordering_log_keys.py" ```python hl_lines="5 8" --8<-- "examples/logger/src/reordering_log_keys.py" ``` -=== "Example CloudWatch Logs excerpt" + +=== "reordering_log_keys_output.json" ```json hl_lines="3 10" --8<-- "examples/logger/src/reordering_log_keys_output.json" @@ -466,13 +470,13 @@ You can change the order of [standard Logger keys](#standard-structured-keys) or By default, this Logger and standard logging library emits records using local time timestamp. You can override this behavior via `utc` parameter: -=== "app.py" +=== "setting_utc_timestamp.py" ```python hl_lines="6" --8<-- "examples/logger/src/setting_utc_timestamp.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "setting_utc_timestamp_output.json" ```json hl_lines="6 13" --8<-- "examples/logger/src/setting_utc_timestamp_output.json" @@ -482,13 +486,13 @@ By default, this Logger and standard logging library emits records using local t By default, Logger uses `str` to handle values non-serializable by JSON. You can override this behavior via `json_default` parameter by passing a Callable: -=== "app.py" +=== "unserializable_values.py" ```python hl_lines="6 17" --8<-- "examples/logger/src/unserializable_values.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "unserializable_values_output.json" ```json hl_lines="4-6" --8<-- "examples/logger/src/unserializable_values_output.json" @@ -511,13 +515,13 @@ By default, Logger uses [LambdaPowertoolsFormatter](#lambdapowertoolsformatter) For these, you can override the `serialize` method from [LambdaPowertoolsFormatter](#lambdapowertoolsformatter). -=== "custom_formatter.py" +=== "bring_your_own_formatter.py" ```python hl_lines="2 5-6 12" --8<-- "examples/logger/src/bring_your_own_formatter.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "bring_your_own_formatter_output.json" ```json hl_lines="6" --8<-- "examples/logger/src/bring_your_own_formatter_output.json" ``` @@ -529,13 +533,13 @@ For exceptional cases where you want to completely replace our formatter logic, ???+ warning You will need to implement `append_keys`, `clear_state`, override `format`, and optionally `remove_keys` to keep the same feature set Powertools Logger provides. This also means keeping state of logging keys added. -=== "collect.py" +=== "bring_your_own_formatter_from_scratch.py" ```python hl_lines="6 9 11-12 15 19 23 26 38" --8<-- "examples/logger/src/bring_your_own_formatter_from_scratch.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "bring_your_own_formatter_from_scratch_output.json" ```json hl_lines="2-4" --8<-- "examples/logger/src/bring_your_own_formatter_from_scratch_output.json" @@ -615,15 +619,16 @@ You can include any of these logging attributes as key value arguments (`kwargs` You can also add them later anywhere in your code with `append_keys`, or remove them with `remove_keys` methods. -=== "collect.py" +=== "append_and_remove_keys.py" ```python hl_lines="3 8 10" ---8<-- "examples/logger/src/append_and_remove_keys.py" ``` -=== "Example CloudWatch Logs excerpt" + +=== "append_and_remove_keys_output.json" ```json hl_lines="6 15-16" - ---8<-- "examples/logger/src/append_and_remove_keys.json" + ---8<-- "examples/logger/src/append_and_remove_keys_output.json" ``` For log records originating from Powertools Logger, the `name` attribute will be the same as `service`, for log records coming from standard library logger, it will be the name of the logger (i.e. what was used as name argument to `logging.getLogger`). @@ -634,13 +639,13 @@ Keys added with `append_keys` will persist across multiple log messages while ke Here's an example where we persist `payment_id` not `request_id`. Note that `payment_id` remains in both log messages while `booking_id` is only available in the first message. -=== "collect.py" +=== "append_keys_vs_extra.py" ```python hl_lines="16 23" ---8<-- "examples/logger/src/append_keys_vs_extra.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "append_keys_vs_extra_output.json" ```json hl_lines="9-10 19" ---8<-- "examples/logger/src/append_keys_vs_extra_output.json" diff --git a/examples/logger/src/append_and_remove_keys.json b/examples/logger/src/append_and_remove_keys_output.json similarity index 100% rename from examples/logger/src/append_and_remove_keys.json rename to examples/logger/src/append_and_remove_keys_output.json diff --git a/examples/logger/src/set_correlation_id_jmespath_event.json b/examples/logger/src/set_correlation_id_jmespath.json similarity index 100% rename from examples/logger/src/set_correlation_id_jmespath_event.json rename to examples/logger/src/set_correlation_id_jmespath.json diff --git a/examples/logger/src/set_correlation_id_method_event.json b/examples/logger/src/set_correlation_id_method.json similarity index 100% rename from examples/logger/src/set_correlation_id_method_event.json rename to examples/logger/src/set_correlation_id_method.json From b23b56393b23866344379c65d3ad2c09c9e5ac89 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 8 Jul 2022 15:35:49 +0200 Subject: [PATCH 3/5] docs(metrics): match code snippet name with filename --- docs/core/metrics.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 713a53b193c..82f2d0597ae 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -57,13 +57,13 @@ You can create metrics using `add_metric`, and you can create dimensions for all ???+ tip You can initialize Metrics in any other module too. It'll keep track of your aggregate metrics in memory to optimize costs (one blob instead of multiples). -=== "Metrics" +=== "add_metrics" ```python hl_lines="10" --8<-- "examples/metrics/src/add_metrics.py" ``` -=== "Metrics with custom dimensions" +=== "add_dimension" ```python hl_lines="13" --8<-- "examples/metrics/src/add_dimension.py" @@ -82,13 +82,13 @@ You can create metrics using `add_metric`, and you can create dimensions for all You can call `add_metric()` with the same metric name multiple times. The values will be grouped together in a list. -=== "Metrics" +=== "add_multi_value_metrics" ```python hl_lines="14-15" --8<-- "examples/metrics/src/add_multi_value_metrics.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "add_multi_value_metrics_output.json" ```python hl_lines="15 24-26" --8<-- "examples/metrics/src/add_multi_value_metrics_output.json" @@ -100,13 +100,13 @@ You can use `set_default_dimensions` method, or `default_dimensions` parameter i If you'd like to remove them at some point, you can use `clear_default_dimensions` method. -=== "set_default_dimensions method" +=== "set_default_dimensions.py" ```python hl_lines="9" --8<-- "examples/metrics/src/set_default_dimensions.py" ``` -=== "with log_metrics decorator" +=== "set_default_dimensions_log_metrics.py" ```python hl_lines="9 13" --8<-- "examples/metrics/src/set_default_dimensions_log_metrics.py" @@ -118,13 +118,13 @@ As you finish adding all your metrics, you need to serialize and flush them to s This decorator also **validates**, **serializes**, and **flushes** all your metrics. During metrics validation, if no metrics are provided then a warning will be logged, but no exception will be raised. -=== "app.py" +=== "add_metrics.py" ```python hl_lines="8" --8<-- "examples/metrics/src/add_metrics.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "log_metrics_output.json" ```json hl_lines="6 9 14 21-23" --8<-- "examples/metrics/src/log_metrics_output.json" @@ -152,13 +152,13 @@ If you want to ensure at least one metric is always emitted, you can pass `raise You can optionally capture cold start metrics with `log_metrics` decorator via `capture_cold_start_metric` param. -=== "app.py" +=== "capture_cold_start_metric.py" ```python hl_lines="7" --8<-- "examples/metrics/src/capture_cold_start_metric.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "capture_cold_start_metric_output.json" ```json hl_lines="9 15 22 24-25" --8<-- "examples/metrics/src/capture_cold_start_metric_output.json" @@ -183,13 +183,13 @@ You can add high-cardinality data as part of your Metrics log with `add_metadata ???+ info **This will not be available during metrics visualization** - Use **dimensions** for this purpose -=== "app.py" +=== "add_metadata.py" ```python hl_lines="14" --8<-- "examples/metrics/src/add_metadata.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "add_metadata_output.json" ```json hl_lines="22" --8<-- "examples/metrics/src/add_metadata_output.json" @@ -204,13 +204,13 @@ CloudWatch EMF uses the same dimensions across all your metrics. Use `single_met **unique metric = (metric_name + dimension_name + dimension_value)** -=== "app.py" +=== "single_metric.py" ```python hl_lines="11" --8<-- "examples/metrics/src/single_metric.py" ``` -=== "Example CloudWatch Logs excerpt" +=== "single_metric_output.json" ```json hl_lines="15" --8<-- "examples/metrics/src/single_metric_output.json" @@ -257,7 +257,7 @@ Make sure to set `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` be You can read standard output and assert whether metrics have been flushed. Here's an example using `pytest` with `capsys` built-in fixture: -=== "Asserting single EMF blob" +=== "assert_single_emf_blob.py" ```python hl_lines="6 9-10 23-34" --8<-- "examples/metrics/src/assert_single_emf_blob.py" @@ -269,7 +269,7 @@ You can read standard output and assert whether metrics have been flushed. Here' --8<-- "examples/metrics/src/add_metrics.py" ``` -=== "Asserting multiple EMF blobs" +=== "assert_multiple_emf_blobs.py" This will be needed when using `capture_cold_start_metric=True`, or when both `Metrics` and `single_metric` are used. @@ -277,7 +277,7 @@ You can read standard output and assert whether metrics have been flushed. Here' --8<-- "examples/metrics/src/assert_multiple_emf_blobs.py" ``` -=== "my_other_module.py" +=== "assert_multiple_emf_blobs_module.py" ```python --8<-- "examples/metrics/src/assert_multiple_emf_blobs_module.py" From 8d357cf0faeb487c77b87168faf58965fde78caa Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 8 Jul 2022 15:39:42 +0200 Subject: [PATCH 4/5] docs(apigateway): match code snippet name with filename --- docs/core/event_handler/api_gateway.md | 62 +++++++++++++------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/docs/core/event_handler/api_gateway.md b/docs/core/event_handler/api_gateway.md index 9db219e994e..6d8f441d661 100644 --- a/docs/core/event_handler/api_gateway.md +++ b/docs/core/event_handler/api_gateway.md @@ -48,13 +48,13 @@ Here's an example on how we can handle the `/todos` path. ???+ info We automatically serialize `Dict` responses as JSON, trim whitespace for compact responses, and set content-type to `application/json`. -=== "app.py" +=== "getting_started_rest_api_resolver.py" ```python hl_lines="5 11 14 28" --8<-- "examples/event_handler_rest/src/getting_started_rest_api_resolver.py" ``` -=== "Request" +=== "getting_started_rest_api_resolver.json" This utility uses `path` and `httpMethod` to route to the right function. This helps make unit tests and local invocation easier too. @@ -62,7 +62,7 @@ Here's an example on how we can handle the `/todos` path. --8<-- "examples/event_handler_rest/src/getting_started_rest_api_resolver.json" ``` -=== "Response" +=== "getting_started_rest_api_resolver_output.json" ```json --8<-- "examples/event_handler_rest/src/getting_started_rest_api_resolver_output.json" @@ -96,13 +96,13 @@ Each dynamic route you set must be part of your function signature. This allows ???+ note For brevity, we will only include the necessary keys for each sample request for the example to work. -=== "app.py" +=== "dynamic_routes.py" ```python hl_lines="14 16" --8<-- "examples/event_handler_rest/src/dynamic_routes.py" ``` -=== "Request" +=== "dynamic_routes.json" ```json --8<-- "examples/event_handler_rest/src/dynamic_routes.json" @@ -123,13 +123,13 @@ You can also combine nested paths with greedy regex to catch in between routes. ???+ warning We choose the most explicit registered route that matches an incoming event. -=== "app.py" +=== "dynamic_routes_catch_all.py" ```python hl_lines="11" --8<-- "examples/event_handler_rest/src/dynamic_routes_catch_all.py" ``` -=== "Request" +=== "dynamic_routes_catch_all.json" ```json --8<-- "examples/event_handler_rest/src/dynamic_routes_catch_all.json" @@ -139,13 +139,13 @@ You can also combine nested paths with greedy regex to catch in between routes. You can use named decorators to specify the HTTP method that should be handled in your functions. That is, `app.`, where the HTTP method could be `get`, `post`, `put`, `patch`, `delete`, and `options`. -=== "app.py" +=== "http_methods.py" ```python hl_lines="14 17" --8<-- "examples/event_handler_rest/src/http_methods.py" ``` -=== "Request" +=== "http_methods.json" ```json --8<-- "examples/event_handler_rest/src/http_methods.json" @@ -225,13 +225,13 @@ When using [Custom Domain API Mappings feature](https://docs.aws.amazon.com/apig To address this API Gateway behavior, we use `strip_prefixes` parameter to account for these prefixes that are now injected into the path regardless of which type of API Gateway you're using. -=== "app.py" +=== "custom_api_mapping.py" ```python hl_lines="8" --8<-- "examples/event_handler_rest/src/custom_api_mapping.py" ``` -=== "Request" +=== "custom_api_mapping.json" ```json --8<-- "examples/event_handler_rest/src/custom_api_mapping.json" @@ -253,13 +253,13 @@ This will ensure that CORS headers are always returned as part of the response w ???+ tip Optionally disable CORS on a per path basis with `cors=False` parameter. -=== "app.py" +=== "setting_cors.py" ```python hl_lines="5 11-12 34" --8<-- "examples/event_handler_rest/src/setting_cors.py" ``` -=== "Response" +=== "setting_cors_output.json" ```json --8<-- "examples/event_handler_rest/src/setting_cors_output.json" @@ -290,13 +290,13 @@ For convenience, these are the default values when using `CORSConfig` to enable You can use the `Response` class to have full control over the response, for example you might want to add additional headers or set a custom Content-type. -=== "app.py" +=== "fine_grained_responses.py" ```python hl_lines="7 24-28" --8<-- "examples/event_handler_rest/src/fine_grained_responses.py" ``` -=== "Response" +=== "fine_grained_responses_output.json" ```json --8<-- "examples/event_handler_rest/src/fine_grained_responses_output.json" @@ -309,19 +309,19 @@ You can compress with gzip and base64 encode your responses via `compress` param ???+ warning The client must send the `Accept-Encoding` header, otherwise a normal response will be sent. -=== "app.py" +=== "compressing_responses.py" ```python hl_lines="14" --8<-- "examples/event_handler_rest/src/compressing_responses.py" ``` -=== "Request" +=== "compressing_responses.json" ```json --8<-- "examples/event_handler_rest/src/compressing_responses.json" ``` -=== "Response" +=== "compressing_responses_output.json" ```json --8<-- "examples/event_handler_rest/src/compressing_responses_output.json" @@ -336,25 +336,25 @@ Like `compress` feature, the client must send the `Accept` header with the corre ???+ warning This feature requires API Gateway to configure binary media types, see [our sample infrastructure](#required-resources) for reference. -=== "app.py" +=== "binary_responses.py" ```python hl_lines="14 20" --8<-- "examples/event_handler_rest/src/binary_responses.py" ``` -=== "logo.svg" +=== "binary_responses_logo.svg" ```xml --8<-- "examples/event_handler_rest/src/binary_responses_logo.svg" ``` -=== "Request" +=== "binary_responses.json" ```json --8<-- "examples/event_handler_rest/src/binary_responses.json" ``` -=== "Response" +=== "binary_responses_output.json" ```json --8<-- "examples/event_handler_rest/src/binary_responses_output.json" @@ -387,9 +387,9 @@ You can instruct API Gateway handler to use a custom serializer to best suit you As you grow the number of routes a given Lambda function should handle, it is natural to split routes into separate files to ease maintenance - That's where the `Router` feature is useful. -Let's assume you have `app.py` as your Lambda function entrypoint and routes in `todos.py`, this is how you'd use the `Router` feature. +Let's assume you have `app.py` as your Lambda function entrypoint and routes in `split_route_module.py`, this is how you'd use the `Router` feature. -=== "todos.py" +=== "split_route_module.py" We import **Router** instead of **APIGatewayRestResolver**; syntax wise is exactly the same. @@ -397,7 +397,7 @@ Let's assume you have `app.py` as your Lambda function entrypoint and routes in --8<-- "examples/event_handler_rest/src/split_route_module.py" ``` -=== "app.py" +=== "split_route.py" We use `include_router` method and include all user routers registered in the `router` global object. @@ -407,17 +407,17 @@ Let's assume you have `app.py` as your Lambda function entrypoint and routes in #### Route prefix -In the previous example, `todos.py` routes had a `/todos` prefix. This might grow over time and become repetitive. +In the previous example, `split_route_module.py` routes had a `/todos` prefix. This might grow over time and become repetitive. -When necessary, you can set a prefix when including a router object. This means you could remove `/todos` prefix in `todos.py` altogether. +When necessary, you can set a prefix when including a router object. This means you could remove `/todos` prefix altogether. -=== "app.py" +=== "split_route_prefix.py" ```python hl_lines="12" --8<-- "examples/event_handler_rest/src/split_route_prefix.py" ``` -=== "todos.py" +=== "split_route_prefix_module.py" ```python hl_lines="13 25" --8<-- "examples/event_handler_rest/src/split_route_prefix_module.py" @@ -509,13 +509,13 @@ your development, building, deployment tooling need to accommodate the distinct You can test your routes by passing a proxy event request where `path` and `httpMethod`. -=== "test_app.py" +=== "assert_http_response.py" ```python hl_lines="21-24" --8<-- "examples/event_handler_rest/src/assert_http_response.py" ``` -=== "app.py" +=== "assert_http_response_module.py" ```python --8<-- "examples/event_handler_rest/src/assert_http_response_module.py" From 4806f91d8838fb6d83de3aadafbf452070d8d187 Mon Sep 17 00:00:00 2001 From: heitorlessa Date: Fri, 8 Jul 2022 15:46:08 +0200 Subject: [PATCH 5/5] docs(metrics): fix leftover --- docs/core/metrics.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 82f2d0597ae..843e35b7eb8 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -57,13 +57,13 @@ You can create metrics using `add_metric`, and you can create dimensions for all ???+ tip You can initialize Metrics in any other module too. It'll keep track of your aggregate metrics in memory to optimize costs (one blob instead of multiples). -=== "add_metrics" +=== "add_metrics.py" ```python hl_lines="10" --8<-- "examples/metrics/src/add_metrics.py" ``` -=== "add_dimension" +=== "add_dimension.py" ```python hl_lines="13" --8<-- "examples/metrics/src/add_dimension.py" @@ -82,7 +82,7 @@ You can create metrics using `add_metric`, and you can create dimensions for all You can call `add_metric()` with the same metric name multiple times. The values will be grouped together in a list. -=== "add_multi_value_metrics" +=== "add_multi_value_metrics.py" ```python hl_lines="14-15" --8<-- "examples/metrics/src/add_multi_value_metrics.py"