CacheNfsTarget

class CacheNfsTarget : KotlinCustomResource

Manages a NFS Target within a HPC Cache.

NOTE:: By request of the service team the provider no longer automatically registering the Microsoft.StorageCache Resource Provider for this resource. To register it you can run az provider register --namespace 'Microsoft.StorageCache'.

Example Usage

package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.network.VirtualNetwork;
import com.pulumi.azure.network.VirtualNetworkArgs;
import com.pulumi.azure.network.Subnet;
import com.pulumi.azure.network.SubnetArgs;
import com.pulumi.azure.hpc.Cache;
import com.pulumi.azure.hpc.CacheArgs;
import com.pulumi.azure.network.NetworkInterface;
import com.pulumi.azure.network.NetworkInterfaceArgs;
import com.pulumi.azure.network.inputs.NetworkInterfaceIpConfigurationArgs;
import com.pulumi.azure.compute.LinuxVirtualMachine;
import com.pulumi.azure.compute.LinuxVirtualMachineArgs;
import com.pulumi.azure.compute.inputs.LinuxVirtualMachineAdminSshKeyArgs;
import com.pulumi.azure.compute.inputs.LinuxVirtualMachineOsDiskArgs;
import com.pulumi.azure.compute.inputs.LinuxVirtualMachineSourceImageReferenceArgs;
import com.pulumi.azure.hpc.CacheNfsTarget;
import com.pulumi.azure.hpc.CacheNfsTargetArgs;
import com.pulumi.azure.hpc.inputs.CacheNfsTargetNamespaceJunctionArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
public static void main(String[] args) {
Pulumi.run(App::stack);
}
public static void stack(Context ctx) {
var example = new ResourceGroup("example", ResourceGroupArgs.builder()
.name("example-resources")
.location("West Europe")
.build());
var exampleVirtualNetwork = new VirtualNetwork("exampleVirtualNetwork", VirtualNetworkArgs.builder()
.name("examplevn")
.addressSpaces("10.0.0.0/16")
.location(example.location())
.resourceGroupName(example.name())
.build());
var exampleHpc = new Subnet("exampleHpc", SubnetArgs.builder()
.name("examplesubnethpc")
.resourceGroupName(example.name())
.virtualNetworkName(exampleVirtualNetwork.name())
.addressPrefixes("10.0.1.0/24")
.build());
var exampleCache = new Cache("exampleCache", CacheArgs.builder()
.name("examplehpccache")
.resourceGroupName(example.name())
.location(example.location())
.cacheSizeInGb(3072)
.subnetId(exampleHpc.id())
.skuName("Standard_2G")
.build());
var exampleVm = new Subnet("exampleVm", SubnetArgs.builder()
.name("examplesubnetvm")
.resourceGroupName(example.name())
.virtualNetworkName(exampleVirtualNetwork.name())
.addressPrefixes("10.0.2.0/24")
.build());
var exampleNetworkInterface = new NetworkInterface("exampleNetworkInterface", NetworkInterfaceArgs.builder()
.name("examplenic")
.location(example.location())
.resourceGroupName(example.name())
.ipConfigurations(NetworkInterfaceIpConfigurationArgs.builder()
.name("internal")
.subnetId(exampleVm.id())
.privateIpAddressAllocation("Dynamic")
.build())
.build());
final var customData = """
#!/bin/bash
sudo -i
apt-get install -y nfs-kernel-server
mkdir -p /export/a/1
mkdir -p /export/a/2
mkdir -p /export/b
cat << EOF /etc/exports
/export/a *(rw,fsid=0,insecure,no_subtree_check,async)
/export/b *(rw,fsid=0,insecure,no_subtree_check,async)
EOF
systemctl start nfs-server
exportfs -arv
""";
var exampleLinuxVirtualMachine = new LinuxVirtualMachine("exampleLinuxVirtualMachine", LinuxVirtualMachineArgs.builder()
.name("examplevm")
.resourceGroupName(example.name())
.location(example.location())
.size("Standard_F2")
.adminUsername("adminuser")
.networkInterfaceIds(exampleNetworkInterface.id())
.adminSshKeys(LinuxVirtualMachineAdminSshKeyArgs.builder()
.username("adminuser")
.publicKey(StdFunctions.file(FileArgs.builder()
.input("~/.ssh/id_rsa.pub")
.build()).result())
.build())
.osDisk(LinuxVirtualMachineOsDiskArgs.builder()
.caching("ReadWrite")
.storageAccountType("Standard_LRS")
.build())
.sourceImageReference(LinuxVirtualMachineSourceImageReferenceArgs.builder()
.publisher("Canonical")
.offer("0001-com-ubuntu-server-jammy")
.sku("22_04-lts")
.version("latest")
.build())
.customData(StdFunctions.base64encode(Base64encodeArgs.builder()
.input(customData)
.build()).result())
.build());
var exampleCacheNfsTarget = new CacheNfsTarget("exampleCacheNfsTarget", CacheNfsTargetArgs.builder()
.name("examplehpcnfstarget")
.resourceGroupName(example.name())
.cacheName(exampleCache.name())
.targetHostName(exampleLinuxVirtualMachine.privateIpAddress())
.usageModel("READ_HEAVY_INFREQ")
.namespaceJunctions(
CacheNfsTargetNamespaceJunctionArgs.builder()
.namespacePath("/nfs/a1")
.nfsExport("/export/a")
.targetPath("1")
.build(),
CacheNfsTargetNamespaceJunctionArgs.builder()
.namespacePath("/nfs/b")
.nfsExport("/export/b")
.build())
.build());
}
}

Import

NFS Target within a HPC Cache can be imported using the resource id, e.g.

$ pulumi import azure:hpc/cacheNfsTarget:CacheNfsTarget example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.StorageCache/caches/cache1/storageTargets/target1

Properties

Link copied to clipboard
val cacheName: Output<String>

The name HPC Cache, which the HPC Cache NFS Target will be added to. Changing this forces a new resource to be created.

Link copied to clipboard
val id: Output<String>
Link copied to clipboard
val name: Output<String>

The name of the HPC Cache NFS Target. Changing this forces a new resource to be created.

Link copied to clipboard

Can be specified multiple times to define multiple namespace_junction. Each namespace_junction block supports fields documented below.

Link copied to clipboard
val pulumiChildResources: Set<KotlinResource>
Link copied to clipboard
Link copied to clipboard
Link copied to clipboard

The name of the Resource Group in which to create the HPC Cache NFS Target. Changing this forces a new resource to be created.

Link copied to clipboard
val targetHostName: Output<String>

The IP address or fully qualified domain name (FQDN) of the HPC Cache NFS target. Changing this forces a new resource to be created.

Link copied to clipboard
val urn: Output<String>
Link copied to clipboard
val usageModel: Output<String>

The type of usage of the HPC Cache NFS Target. Possible values are: READ_HEAVY_INFREQ, READ_HEAVY_CHECK_180, READ_ONLY, READ_WRITE, WRITE_WORKLOAD_15, WRITE_AROUND, WRITE_WORKLOAD_CHECK_30, WRITE_WORKLOAD_CHECK_60 and WRITE_WORKLOAD_CLOUDWS.

Link copied to clipboard

The amount of time the cache waits before it checks the back-end storage for file updates. Possible values are between 1 and 31536000.

Link copied to clipboard

The amount of time the cache waits after the last file change before it copies the changed file to back-end storage. Possible values are between 1 and 31536000.