Task Set Args
Provides an ECS task set - effectively a task that is expected to run until an error occurs or a user terminates it (typically a webserver or a database). See ECS Task Set section in AWS developer guide.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.ecs.TaskSet("example", {
service: exampleAwsEcsService.id,
cluster: exampleAwsEcsCluster.id,
taskDefinition: exampleAwsEcsTaskDefinition.arn,
loadBalancers: [{
targetGroupArn: exampleAwsLbTargetGroup.arn,
containerName: "mongo",
containerPort: 8080,
}],
});import pulumi
import pulumi_aws as aws
example = aws.ecs.TaskSet("example",
service=example_aws_ecs_service["id"],
cluster=example_aws_ecs_cluster["id"],
task_definition=example_aws_ecs_task_definition["arn"],
load_balancers=[aws.ecs.TaskSetLoadBalancerArgs(
target_group_arn=example_aws_lb_target_group["arn"],
container_name="mongo",
container_port=8080,
)])using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Ecs.TaskSet("example", new()
{
Service = exampleAwsEcsService.Id,
Cluster = exampleAwsEcsCluster.Id,
TaskDefinition = exampleAwsEcsTaskDefinition.Arn,
LoadBalancers = new[]
{
new Aws.Ecs.Inputs.TaskSetLoadBalancerArgs
{
TargetGroupArn = exampleAwsLbTargetGroup.Arn,
ContainerName = "mongo",
ContainerPort = 8080,
},
},
});
});package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := ecs.NewTaskSet(ctx, "example", &ecs.TaskSetArgs{
Service: pulumi.Any(exampleAwsEcsService.Id),
Cluster: pulumi.Any(exampleAwsEcsCluster.Id),
TaskDefinition: pulumi.Any(exampleAwsEcsTaskDefinition.Arn),
LoadBalancers: ecs.TaskSetLoadBalancerArray{
&ecs.TaskSetLoadBalancerArgs{
TargetGroupArn: pulumi.Any(exampleAwsLbTargetGroup.Arn),
ContainerName: pulumi.String("mongo"),
ContainerPort: pulumi.Int(8080),
},
},
})
if err != nil {
return err
}
return nil
})
}package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.ecs.TaskSet;
import com.pulumi.aws.ecs.TaskSetArgs;
import com.pulumi.aws.ecs.inputs.TaskSetLoadBalancerArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new TaskSet("example", TaskSetArgs.builder()
.service(exampleAwsEcsService.id())
.cluster(exampleAwsEcsCluster.id())
.taskDefinition(exampleAwsEcsTaskDefinition.arn())
.loadBalancers(TaskSetLoadBalancerArgs.builder()
.targetGroupArn(exampleAwsLbTargetGroup.arn())
.containerName("mongo")
.containerPort(8080)
.build())
.build());
}
}resources:
example:
type: aws:ecs:TaskSet
properties:
service: ${exampleAwsEcsService.id}
cluster: ${exampleAwsEcsCluster.id}
taskDefinition: ${exampleAwsEcsTaskDefinition.arn}
loadBalancers:
- targetGroupArn: ${exampleAwsLbTargetGroup.arn}
containerName: mongo
containerPort: 8080Ignoring Changes to Scale
You can utilize the generic resource lifecycle configuration block with ignore_changes to create an ECS service with an initial count of running instances, then ignore any changes to that count caused externally (e.g. Application Autoscaling).
import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.ecs.TaskSet("example", {scale: {
value: 50,
}});import pulumi
import pulumi_aws as aws
example = aws.ecs.TaskSet("example", scale=aws.ecs.TaskSetScaleArgs(
value=50,
))using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.Ecs.TaskSet("example", new()
{
Scale = new Aws.Ecs.Inputs.TaskSetScaleArgs
{
Value = 50,
},
});
});package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := ecs.NewTaskSet(ctx, "example", &ecs.TaskSetArgs{
Scale: &ecs.TaskSetScaleArgs{
Value: pulumi.Float64(50),
},
})
if err != nil {
return err
}
return nil
})
}package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.ecs.TaskSet;
import com.pulumi.aws.ecs.TaskSetArgs;
import com.pulumi.aws.ecs.inputs.TaskSetScaleArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new TaskSet("example", TaskSetArgs.builder()
.scale(TaskSetScaleArgs.builder()
.value(50)
.build())
.build());
}
}resources:
example:
type: aws:ecs:TaskSet
properties:
scale:
value: 50Import
Using pulumi import, import ECS Task Sets using the task_set_id, service, and cluster separated by commas (,). For example:
$ pulumi import aws:ecs/taskSet:TaskSet example ecs-svc/7177320696926227436,arn:aws:ecs:us-west-2:123456789101:service/example/example-1234567890,arn:aws:ecs:us-west-2:123456789101:cluster/exampleConstructors
Functions
Properties
Whether to allow deleting the task set without waiting for scaling down to 0. You can force a task set to delete even if it's in the process of scaling a resource. Normally, the provider drains all the tasks before deleting the task set. This bypasses that behavior and potentially leaves resources dangling.
The platform version on which to run your service. Only applicable for launch_type set to FARGATE. Defaults to LATEST. More information about Fargate platform versions can be found in the AWS ECS User Guide.
A map of tags to assign to the file system. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level. If you have set copy_tags_to_backups to true, and you specify one or more tags, no existing file system tags are copied from the file system to the backup.