EventSourceMappingArgs

data class EventSourceMappingArgs(val amazonManagedKafkaEventSourceConfig: Output<EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs>? = null, val batchSize: Output<Int>? = null, val bisectBatchOnFunctionError: Output<Boolean>? = null, val destinationConfig: Output<EventSourceMappingDestinationConfigArgs>? = null, val documentDbEventSourceConfig: Output<EventSourceMappingDocumentDbEventSourceConfigArgs>? = null, val enabled: Output<Boolean>? = null, val eventSourceArn: Output<String>? = null, val filterCriteria: Output<EventSourceMappingFilterCriteriaArgs>? = null, val functionName: Output<String>? = null, val functionResponseTypes: Output<List<String>>? = null, val maximumBatchingWindowInSeconds: Output<Int>? = null, val maximumRecordAgeInSeconds: Output<Int>? = null, val maximumRetryAttempts: Output<Int>? = null, val parallelizationFactor: Output<Int>? = null, val queues: Output<List<String>>? = null, val scalingConfig: Output<EventSourceMappingScalingConfigArgs>? = null, val selfManagedEventSource: Output<EventSourceMappingSelfManagedEventSourceArgs>? = null, val selfManagedKafkaEventSourceConfig: Output<EventSourceMappingSelfManagedKafkaEventSourceConfigArgs>? = null, val sourceAccessConfigurations: Output<List<EventSourceMappingSourceAccessConfigurationArgs>>? = null, val startingPosition: Output<String>? = null, val startingPositionTimestamp: Output<String>? = null, val topics: Output<List<String>>? = null, val tumblingWindowInSeconds: Output<Int>? = null) : ConvertibleToJava<EventSourceMappingArgs>

Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis, DynamoDB, SQS, Amazon MQ and Managed Streaming for Apache Kafka (MSK). For information about Lambda and how to use it, see What is AWS Lambda?. For information about event source mappings, see CreateEventSourceMapping in the API docs.

Example Usage

DynamoDB

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_dynamodb_table.example().stream_arn())
.functionName(aws_lambda_function.example().arn())
.startingPosition("LATEST")
.build());
}
}

Kinesis

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_kinesis_stream.example().arn())
.functionName(aws_lambda_function.example().arn())
.startingPosition("LATEST")
.build());
}
}

Managed Streaming for Apache Kafka (MSK)

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_msk_cluster.example().arn())
.functionName(aws_lambda_function.example().arn())
.topics("Example")
.startingPosition("TRIM_HORIZON")
.build());
}
}

Self Managed Apache Kafka

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSelfManagedEventSourceArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.functionName(aws_lambda_function.example().arn())
.topics("Example")
.startingPosition("TRIM_HORIZON")
.selfManagedEventSource(EventSourceMappingSelfManagedEventSourceArgs.builder()
.endpoints(Map.of("KAFKA_BOOTSTRAP_SERVERS", "kafka1.example.com:9092,kafka2.example.com:9092"))
.build())
.sourceAccessConfigurations(
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SUBNET")
.uri("subnet:subnet-example1")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SUBNET")
.uri("subnet:subnet-example2")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VPC_SECURITY_GROUP")
.uri("security_group:sg-example")
.build())
.build());
}
}

SQS

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_sqs_queue.sqs_queue_test().arn())
.functionName(aws_lambda_function.example().arn())
.build());
}
}

SQS with event filter

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingFilterCriteriaArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.eventSourceArn(aws_sqs_queue.sqs_queue_test().arn())
.functionName(aws_lambda_function.example().arn())
.filterCriteria(EventSourceMappingFilterCriteriaArgs.builder()
.filters(EventSourceMappingFilterCriteriaFilterArgs.builder()
.pattern(serializeJson(
jsonObject(
jsonProperty("body", jsonObject(
jsonProperty("Temperature", jsonArray(jsonObject(
jsonProperty("numeric", jsonArray(
">",
0,
"<=",
100
))
))),
jsonProperty("Location", jsonArray("New York"))
))
)))
.build())
.build())
.build());
}
}

Amazon MQ (ActiveMQ)

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.batchSize(10)
.eventSourceArn(aws_mq_broker.example().arn())
.enabled(true)
.functionName(aws_lambda_function.example().arn())
.queues("example")
.sourceAccessConfigurations(EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("BASIC_AUTH")
.uri(aws_secretsmanager_secret_version.example().arn())
.build())
.build());
}
}

Amazon MQ (RabbitMQ)

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.lambda.EventSourceMapping;
import com.pulumi.aws.lambda.EventSourceMappingArgs;
import com.pulumi.aws.lambda.inputs.EventSourceMappingSourceAccessConfigurationArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new EventSourceMapping("example", EventSourceMappingArgs.builder()
.batchSize(1)
.eventSourceArn(aws_mq_broker.example().arn())
.enabled(true)
.functionName(aws_lambda_function.example().arn())
.queues("example")
.sourceAccessConfigurations(
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("VIRTUAL_HOST")
.uri("/example")
.build(),
EventSourceMappingSourceAccessConfigurationArgs.builder()
.type("BASIC_AUTH")
.uri(aws_secretsmanager_secret_version.example().arn())
.build())
.build());
}
}

Import

Lambda event source mappings can be imported using the UUID (event source mapping identifier), e.g.,

$ pulumi import aws:lambda/eventSourceMapping:EventSourceMapping event_source_mapping 12345kxodurf3443

Constructors

Link copied to clipboard
constructor(amazonManagedKafkaEventSourceConfig: Output<EventSourceMappingAmazonManagedKafkaEventSourceConfigArgs>? = null, batchSize: Output<Int>? = null, bisectBatchOnFunctionError: Output<Boolean>? = null, destinationConfig: Output<EventSourceMappingDestinationConfigArgs>? = null, documentDbEventSourceConfig: Output<EventSourceMappingDocumentDbEventSourceConfigArgs>? = null, enabled: Output<Boolean>? = null, eventSourceArn: Output<String>? = null, filterCriteria: Output<EventSourceMappingFilterCriteriaArgs>? = null, functionName: Output<String>? = null, functionResponseTypes: Output<List<String>>? = null, maximumBatchingWindowInSeconds: Output<Int>? = null, maximumRecordAgeInSeconds: Output<Int>? = null, maximumRetryAttempts: Output<Int>? = null, parallelizationFactor: Output<Int>? = null, queues: Output<List<String>>? = null, scalingConfig: Output<EventSourceMappingScalingConfigArgs>? = null, selfManagedEventSource: Output<EventSourceMappingSelfManagedEventSourceArgs>? = null, selfManagedKafkaEventSourceConfig: Output<EventSourceMappingSelfManagedKafkaEventSourceConfigArgs>? = null, sourceAccessConfigurations: Output<List<EventSourceMappingSourceAccessConfigurationArgs>>? = null, startingPosition: Output<String>? = null, startingPositionTimestamp: Output<String>? = null, topics: Output<List<String>>? = null, tumblingWindowInSeconds: Output<Int>? = null)

Properties

Link copied to clipboard

Additional configuration block for Amazon Managed Kafka sources. Incompatible with "self_managed_event_source" and "self_managed_kafka_event_source_config". Detailed below.

Link copied to clipboard
val batchSize: Output<Int>? = null

The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to 100 for DynamoDB, Kinesis, MQ and MSK, 10 for SQS.

Link copied to clipboard
val bisectBatchOnFunctionError: Output<Boolean>? = null
Link copied to clipboard
Link copied to clipboard
val enabled: Output<Boolean>? = null

Determines if the mapping will be enabled on creation. Defaults to true.

Link copied to clipboard
val eventSourceArn: Output<String>? = null

The event source ARN - this is required for Kinesis stream, DynamoDB stream, SQS queue, MQ broker, MSK cluster or DocumentDB change stream. It is incompatible with a Self Managed Kafka source.

Link copied to clipboard

The criteria to use for event filtering Kinesis stream, DynamoDB stream, SQS queue event sources. Detailed below.

Link copied to clipboard
val functionName: Output<String>? = null

The name or the ARN of the Lambda function that will be subscribing to events.

Link copied to clipboard
val functionResponseTypes: Output<List<String>>? = null

A list of current response type enums applied to the event source mapping for AWS Lambda checkpointing. Only available for SQS and stream sources (DynamoDB and Kinesis). Valid values: ReportBatchItemFailures.

Link copied to clipboard
val maximumBatchingWindowInSeconds: Output<Int>? = null

The maximum amount of time to gather records before invoking the function, in seconds (between 0 and 300). Records will continue to buffer (or accumulate in the case of an SQS queue event source) until either maximum_batching_window_in_seconds expires or batch_size has been met. For streaming event sources, defaults to as soon as records are available in the stream. If the batch it reads from the stream/queue only has one record in it, Lambda only sends one record to the function. Only available for stream sources (DynamoDB and Kinesis) and SQS standard queues.

Link copied to clipboard
val maximumRecordAgeInSeconds: Output<Int>? = null
Link copied to clipboard
val maximumRetryAttempts: Output<Int>? = null
Link copied to clipboard
val parallelizationFactor: Output<Int>? = null
Link copied to clipboard
val queues: Output<List<String>>? = null

The name of the Amazon MQ broker destination queue to consume. Only available for MQ sources. A single queue name must be specified.

Link copied to clipboard

Scaling configuration of the event source. Only available for SQS queues. Detailed below.

Link copied to clipboard

Additional configuration block for Self Managed Kafka sources. Incompatible with "event_source_arn" and "amazon_managed_kafka_event_source_config". Detailed below.

Link copied to clipboard

For Self Managed Kafka sources, the access configuration for the source. If set, configuration must also include self_managed_event_source. Detailed below.

Link copied to clipboard
val startingPosition: Output<String>? = null

The position in the stream where AWS Lambda should start reading. Must be one of AT_TIMESTAMP (Kinesis only), LATEST or TRIM_HORIZON if getting events from Kinesis, DynamoDB, MSK or Self Managed Apache Kafka. Must not be provided if getting events from SQS. More information about these positions can be found in the AWS DynamoDB Streams API Reference and AWS Kinesis API Reference.

Link copied to clipboard
val startingPositionTimestamp: Output<String>? = null

A timestamp in RFC3339 format of the data record which to start reading when using starting_position set to AT_TIMESTAMP. If a record with this exact timestamp does not exist, the next later record is chosen. If the timestamp is older than the current trim horizon, the oldest available record is chosen.

Link copied to clipboard
val topics: Output<List<String>>? = null

The name of the Kafka topics. Only available for MSK sources. A single topic name must be specified.

Link copied to clipboard
val tumblingWindowInSeconds: Output<Int>? = null

The duration in seconds of a processing window for AWS Lambda streaming analytics. The range is between 1 second up to 900 seconds. Only available for stream sources (DynamoDB and Kinesis).

Functions

Link copied to clipboard
open override fun toJava(): EventSourceMappingArgs