LocationHdfs

class LocationHdfs : KotlinCustomResource

Manages an HDFS Location within AWS DataSync.

NOTE: The DataSync Agents must be available before creating this resource.

Example Usage

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
const example = new aws.datasync.LocationHdfs("example", {
agentArns: [exampleAwsDatasyncAgent.arn],
authenticationType: "SIMPLE",
simpleUser: "example",
nameNodes: [{
hostname: exampleAwsInstance.privateDns,
port: 80,
}],
});
import pulumi
import pulumi_aws as aws
example = aws.datasync.LocationHdfs("example",
agent_arns=[example_aws_datasync_agent["arn"]],
authentication_type="SIMPLE",
simple_user="example",
name_nodes=[{
"hostname": example_aws_instance["privateDns"],
"port": 80,
}])
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
return await Deployment.RunAsync(() =>
{
var example = new Aws.DataSync.LocationHdfs("example", new()
{
AgentArns = new[]
{
exampleAwsDatasyncAgent.Arn,
},
AuthenticationType = "SIMPLE",
SimpleUser = "example",
NameNodes = new[]
{
new Aws.DataSync.Inputs.LocationHdfsNameNodeArgs
{
Hostname = exampleAwsInstance.PrivateDns,
Port = 80,
},
},
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/datasync"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
_, err := datasync.NewLocationHdfs(ctx, "example", &datasync.LocationHdfsArgs{
AgentArns: pulumi.StringArray{
exampleAwsDatasyncAgent.Arn,
},
AuthenticationType: pulumi.String("SIMPLE"),
SimpleUser: pulumi.String("example"),
NameNodes: datasync.LocationHdfsNameNodeArray{
&datasync.LocationHdfsNameNodeArgs{
Hostname: pulumi.Any(exampleAwsInstance.PrivateDns),
Port: pulumi.Int(80),
},
},
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.datasync.LocationHdfs;
import com.pulumi.aws.datasync.LocationHdfsArgs;
import com.pulumi.aws.datasync.inputs.LocationHdfsNameNodeArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new LocationHdfs("example", LocationHdfsArgs.builder()
.agentArns(exampleAwsDatasyncAgent.arn())
.authenticationType("SIMPLE")
.simpleUser("example")
.nameNodes(LocationHdfsNameNodeArgs.builder()
.hostname(exampleAwsInstance.privateDns())
.port(80)
.build())
.build());
}
}
resources:
example:
type: aws:datasync:LocationHdfs
properties:
agentArns:
- ${exampleAwsDatasyncAgent.arn}
authenticationType: SIMPLE
simpleUser: example
nameNodes:
- hostname: ${exampleAwsInstance.privateDns}
port: 80

Kerberos Authentication

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";
import * as std from "@pulumi/std";
const example = new aws.datasync.LocationHdfs("example", {
agentArns: [exampleAwsDatasyncAgent.arn],
authenticationType: "KERBEROS",
nameNodes: [{
hostname: exampleAwsInstance.privateDns,
port: 80,
}],
kerberosPrincipal: "user@example.com",
kerberosKeytabBase64: std.filebase64({
input: "user.keytab",
}).then(invoke => invoke.result),
kerberosKrb5Conf: std.file({
input: "krb5.conf",
}).then(invoke => invoke.result),
});
import pulumi
import pulumi_aws as aws
import pulumi_std as std
example = aws.datasync.LocationHdfs("example",
agent_arns=[example_aws_datasync_agent["arn"]],
authentication_type="KERBEROS",
name_nodes=[{
"hostname": example_aws_instance["privateDns"],
"port": 80,
}],
kerberos_principal="user@example.com",
kerberos_keytab_base64=std.filebase64(input="user.keytab").result,
kerberos_krb5_conf=std.file(input="krb5.conf").result)
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;
using Std = Pulumi.Std;
return await Deployment.RunAsync(() =>
{
var example = new Aws.DataSync.LocationHdfs("example", new()
{
AgentArns = new[]
{
exampleAwsDatasyncAgent.Arn,
},
AuthenticationType = "KERBEROS",
NameNodes = new[]
{
new Aws.DataSync.Inputs.LocationHdfsNameNodeArgs
{
Hostname = exampleAwsInstance.PrivateDns,
Port = 80,
},
},
KerberosPrincipal = "user@example.com",
KerberosKeytabBase64 = Std.Filebase64.Invoke(new()
{
Input = "user.keytab",
}).Apply(invoke => invoke.Result),
KerberosKrb5Conf = Std.File.Invoke(new()
{
Input = "krb5.conf",
}).Apply(invoke => invoke.Result),
});
});
package main
import (
"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/datasync"
"github.com/pulumi/pulumi-std/sdk/go/std"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
invokeFilebase64, err := std.Filebase64(ctx, &std.Filebase64Args{
Input: "user.keytab",
}, nil)
if err != nil {
return err
}
invokeFile1, err := std.File(ctx, &std.FileArgs{
Input: "krb5.conf",
}, nil)
if err != nil {
return err
}
_, err = datasync.NewLocationHdfs(ctx, "example", &datasync.LocationHdfsArgs{
AgentArns: pulumi.StringArray{
exampleAwsDatasyncAgent.Arn,
},
AuthenticationType: pulumi.String("KERBEROS"),
NameNodes: datasync.LocationHdfsNameNodeArray{
&datasync.LocationHdfsNameNodeArgs{
Hostname: pulumi.Any(exampleAwsInstance.PrivateDns),
Port: pulumi.Int(80),
},
},
KerberosPrincipal: pulumi.String("user@example.com"),
KerberosKeytabBase64: pulumi.String(invokeFilebase64.Result),
KerberosKrb5Conf: pulumi.String(invokeFile1.Result),
})
if err != nil {
return err
}
return nil
})
}
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.datasync.LocationHdfs;
import com.pulumi.aws.datasync.LocationHdfsArgs;
import com.pulumi.aws.datasync.inputs.LocationHdfsNameNodeArgs;
import com.pulumi.std.StdFunctions;
import com.pulumi.std.inputs.Filebase64Args;
import com.pulumi.std.inputs.FileArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new LocationHdfs("example", LocationHdfsArgs.builder()
.agentArns(exampleAwsDatasyncAgent.arn())
.authenticationType("KERBEROS")
.nameNodes(LocationHdfsNameNodeArgs.builder()
.hostname(exampleAwsInstance.privateDns())
.port(80)
.build())
.kerberosPrincipal("user@example.com")
.kerberosKeytabBase64(StdFunctions.filebase64(Filebase64Args.builder()
.input("user.keytab")
.build()).result())
.kerberosKrb5Conf(StdFunctions.file(FileArgs.builder()
.input("krb5.conf")
.build()).result())
.build());
}
}
resources:
example:
type: aws:datasync:LocationHdfs
properties:
agentArns:
- ${exampleAwsDatasyncAgent.arn}
authenticationType: KERBEROS
nameNodes:
- hostname: ${exampleAwsInstance.privateDns}
port: 80
kerberosPrincipal: user@example.com
kerberosKeytabBase64:
fn::invoke:
function: std:filebase64
arguments:
input: user.keytab
return: result
kerberosKrb5Conf:
fn::invoke:
function: std:file
arguments:
input: krb5.conf
return: result

Import

Using pulumi import, import aws_datasync_location_hdfs using the Amazon Resource Name (ARN). For example:

$ pulumi import aws:datasync/locationHdfs:LocationHdfs example arn:aws:datasync:us-east-1:123456789012:location/loc-12345678901234567

Properties

Link copied to clipboard
val agentArns: Output<List<String>>

A list of DataSync Agent ARNs with which this location will be associated.

Link copied to clipboard
val arn: Output<String>

Amazon Resource Name (ARN) of the DataSync Location.

Link copied to clipboard

The type of authentication used to determine the identity of the user. Valid values are SIMPLE and KERBEROS.

Link copied to clipboard
val blockSize: Output<Int>?

The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB).

Link copied to clipboard
val id: Output<String>
Link copied to clipboard
val kerberosKeytab: Output<String>?

The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. Use kerberos_keytab_base64 instead whenever the value is not a valid UTF-8 string. If KERBEROS is specified for authentication_type, this parameter (or kerberos_keytab_base64) is required.

Link copied to clipboard

Use instead of kerberos_keytab to pass base64-encoded binary data directly. If KERBEROS is specified for authentication_type, this parameter (or kerberos_keytab) is required.

Link copied to clipboard
val kerberosKrb5Conf: Output<String>?

The krb5.conf file that contains the Kerberos configuration information. Use kerberos_krb5_conf_base64 instead whenever the value is not a valid UTF-8 string. If KERBEROS is specified for authentication_type, this parameter (or kerberos_krb5_conf_base64) is required.

Link copied to clipboard

Use instead of kerberos_krb5_conf to pass base64-encoded binary data directly. If KERBEROS is specified for authentication_type, this parameter (or kerberos_krb5_conf) is required.

Link copied to clipboard

The Kerberos principal with access to the files and folders on the HDFS cluster. If KERBEROS is specified for authentication_type, this parameter is required.

Link copied to clipboard

The URI of the HDFS cluster's Key Management Server (KMS).

Link copied to clipboard

The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You can use only one NameNode. See configuration below.

Link copied to clipboard
val pulumiChildResources: Set<KotlinResource>
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard

The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer protection settings configured on the Hadoop Distributed File System (HDFS) cluster. If qop_configuration isn't specified, rpc_protection and data_transfer_protection default to PRIVACY. If you set RpcProtection or DataTransferProtection, the other parameter assumes the same value. See configuration below.

Link copied to clipboard
val replicationFactor: Output<Int>?

The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is replicated to three DataNodes.

Link copied to clipboard
val simpleUser: Output<String>?

The user name used to identify the client on the host operating system. If SIMPLE is specified for authentication_type, this parameter is required.

Link copied to clipboard
val subdirectory: Output<String>?

A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS cluster. If the subdirectory isn't specified, it will default to /.

Link copied to clipboard
val tags: Output<Map<String, String>>?

Key-value pairs of resource tags to assign to the DataSync Location. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.

Link copied to clipboard
val tagsAll: Output<Map<String, String>>

A map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Link copied to clipboard
val uri: Output<String>
Link copied to clipboard
val urn: Output<String>