CrawlerArgs

data class CrawlerArgs(val catalogTargets: Output<List<CrawlerCatalogTargetArgs>>? = null, val classifiers: Output<List<String>>? = null, val configuration: Output<String>? = null, val databaseName: Output<String>? = null, val deltaTargets: Output<List<CrawlerDeltaTargetArgs>>? = null, val description: Output<String>? = null, val dynamodbTargets: Output<List<CrawlerDynamodbTargetArgs>>? = null, val jdbcTargets: Output<List<CrawlerJdbcTargetArgs>>? = null, val lakeFormationConfiguration: Output<CrawlerLakeFormationConfigurationArgs>? = null, val lineageConfiguration: Output<CrawlerLineageConfigurationArgs>? = null, val mongodbTargets: Output<List<CrawlerMongodbTargetArgs>>? = null, val name: Output<String>? = null, val recrawlPolicy: Output<CrawlerRecrawlPolicyArgs>? = null, val role: Output<String>? = null, val s3Targets: Output<List<CrawlerS3TargetArgs>>? = null, val schedule: Output<String>? = null, val schemaChangePolicy: Output<CrawlerSchemaChangePolicyArgs>? = null, val securityConfiguration: Output<String>? = null, val tablePrefix: Output<String>? = null, val tags: Output<Map<String, String>>? = null) : ConvertibleToJava<CrawlerArgs>

Manages a Glue Crawler. More information can be found in the AWS Glue Developer Guide

Example Usage

DynamoDB Target Example

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.glue.Crawler;
import com.pulumi.aws.glue.CrawlerArgs;
import com.pulumi.aws.glue.inputs.CrawlerDynamodbTargetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Crawler("example", CrawlerArgs.builder()
.databaseName(aws_glue_catalog_database.example().name())
.role(aws_iam_role.example().arn())
.dynamodbTargets(CrawlerDynamodbTargetArgs.builder()
.path("table-name")
.build())
.build());
}
}

JDBC Target Example

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.glue.Crawler;
import com.pulumi.aws.glue.CrawlerArgs;
import com.pulumi.aws.glue.inputs.CrawlerJdbcTargetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Crawler("example", CrawlerArgs.builder()
.databaseName(aws_glue_catalog_database.example().name())
.role(aws_iam_role.example().arn())
.jdbcTargets(CrawlerJdbcTargetArgs.builder()
.connectionName(aws_glue_connection.example().name())
.path("database-name/%")
.build())
.build());
}
}

S3 Target Example

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.glue.Crawler;
import com.pulumi.aws.glue.CrawlerArgs;
import com.pulumi.aws.glue.inputs.CrawlerS3TargetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Crawler("example", CrawlerArgs.builder()
.databaseName(aws_glue_catalog_database.example().name())
.role(aws_iam_role.example().arn())
.s3Targets(CrawlerS3TargetArgs.builder()
.path(String.format("s3://%s", aws_s3_bucket.example().bucket()))
.build())
.build());
}
}

Catalog Target Example

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.glue.Crawler;
import com.pulumi.aws.glue.CrawlerArgs;
import com.pulumi.aws.glue.inputs.CrawlerCatalogTargetArgs;
import com.pulumi.aws.glue.inputs.CrawlerSchemaChangePolicyArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Crawler("example", CrawlerArgs.builder()
.databaseName(aws_glue_catalog_database.example().name())
.role(aws_iam_role.example().arn())
.catalogTargets(CrawlerCatalogTargetArgs.builder()
.databaseName(aws_glue_catalog_database.example().name())
.tables(aws_glue_catalog_table.example().name())
.build())
.schemaChangePolicy(CrawlerSchemaChangePolicyArgs.builder()
.deleteBehavior("LOG")
.build())
.configuration("""
{
"Version":1.0,
"Grouping": {
"TableGroupingPolicy": "CombineCompatibleSchemas"
}
}
""")
.build());
}
}

MongoDB Target Example

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.glue.Crawler;
import com.pulumi.aws.glue.CrawlerArgs;
import com.pulumi.aws.glue.inputs.CrawlerMongodbTargetArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new Crawler("example", CrawlerArgs.builder()
.databaseName(aws_glue_catalog_database.example().name())
.role(aws_iam_role.example().arn())
.mongodbTargets(CrawlerMongodbTargetArgs.builder()
.connectionName(aws_glue_connection.example().name())
.path("database-name/%")
.build())
.build());
}
}

Configuration Settings Example

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.glue.Crawler;
import com.pulumi.aws.glue.CrawlerArgs;
import com.pulumi.aws.glue.inputs.CrawlerS3TargetArgs;
import static com.pulumi.codegen.internal.Serialization.*;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var eventsCrawler = new Crawler("eventsCrawler", CrawlerArgs.builder()
.databaseName(aws_glue_catalog_database.glue_database().name())
.schedule("cron(0 1 * * ? *)")
.role(aws_iam_role.glue_role().arn())
.tags(var_.tags())
.configuration(serializeJson(
jsonObject(
jsonProperty("Grouping", jsonObject(
jsonProperty("TableGroupingPolicy", "CombineCompatibleSchemas")
)),
jsonProperty("CrawlerOutput", jsonObject(
jsonProperty("Partitions", jsonObject(
jsonProperty("AddOrUpdateBehavior", "InheritFromTable")
))
)),
jsonProperty("Version", 1)
)))
.s3Targets(CrawlerS3TargetArgs.builder()
.path(String.format("s3://%s", aws_s3_bucket.data_lake_bucket().bucket()))
.build())
.build());
}
}

Import

Glue Crawlers can be imported using name, e.g.,

$ pulumi import aws:glue/crawler:Crawler MyJob MyJob

Constructors

Link copied to clipboard
constructor(catalogTargets: Output<List<CrawlerCatalogTargetArgs>>? = null, classifiers: Output<List<String>>? = null, configuration: Output<String>? = null, databaseName: Output<String>? = null, deltaTargets: Output<List<CrawlerDeltaTargetArgs>>? = null, description: Output<String>? = null, dynamodbTargets: Output<List<CrawlerDynamodbTargetArgs>>? = null, jdbcTargets: Output<List<CrawlerJdbcTargetArgs>>? = null, lakeFormationConfiguration: Output<CrawlerLakeFormationConfigurationArgs>? = null, lineageConfiguration: Output<CrawlerLineageConfigurationArgs>? = null, mongodbTargets: Output<List<CrawlerMongodbTargetArgs>>? = null, name: Output<String>? = null, recrawlPolicy: Output<CrawlerRecrawlPolicyArgs>? = null, role: Output<String>? = null, s3Targets: Output<List<CrawlerS3TargetArgs>>? = null, schedule: Output<String>? = null, schemaChangePolicy: Output<CrawlerSchemaChangePolicyArgs>? = null, securityConfiguration: Output<String>? = null, tablePrefix: Output<String>? = null, tags: Output<Map<String, String>>? = null)

Properties

Link copied to clipboard
Link copied to clipboard
val classifiers: Output<List<String>>? = null

List of custom classifiers. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

Link copied to clipboard
val configuration: Output<String>? = null

JSON string of configuration information. For more details see Setting Crawler Configuration Options.

Link copied to clipboard
val databaseName: Output<String>? = null

Glue database where results are written.

Link copied to clipboard
Link copied to clipboard
val description: Output<String>? = null

Description of the crawler.

Link copied to clipboard

List of nested DynamoDB target arguments. See Dynamodb Target below.

Link copied to clipboard
val jdbcTargets: Output<List<CrawlerJdbcTargetArgs>>? = null

List of nested JBDC target arguments. See JDBC Target below.

Link copied to clipboard

Specifies Lake Formation configuration settings for the crawler. See Lake Formation Configuration below.

Link copied to clipboard

Specifies data lineage configuration settings for the crawler. See Lineage Configuration below.

Link copied to clipboard

List nested MongoDB target arguments. See MongoDB Target below.

Link copied to clipboard
val name: Output<String>? = null

Name of the crawler.

Link copied to clipboard

A policy that specifies whether to crawl the entire dataset again, or to crawl only folders that were added since the last crawler run.. See Recrawl Policy below.

Link copied to clipboard
val role: Output<String>? = null

The IAM role friendly name (including path without leading slash), or ARN of an IAM role, used by the crawler to access other resources.

Link copied to clipboard
val s3Targets: Output<List<CrawlerS3TargetArgs>>? = null

List nested Amazon S3 target arguments. See S3 Target below.

Link copied to clipboard
val schedule: Output<String>? = null

A cron expression used to specify the schedule. For more information, see Time-Based Schedules for Jobs and Crawlers. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

Link copied to clipboard

Policy for the crawler's update and deletion behavior. See Schema Change Policy below.

Link copied to clipboard
val securityConfiguration: Output<String>? = null

The name of Security Configuration to be used by the crawler

Link copied to clipboard
val tablePrefix: Output<String>? = null

The table prefix used for catalog tables that are created.

Link copied to clipboard
val tags: Output<Map<String, String>>? = null

Key-value map of resource tags. .If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

Functions

Link copied to clipboard
open override fun toJava(): CrawlerArgs