1. Packages
  2. Spotinst
  3. API Docs
  4. spark
  5. Ocean
Spotinst v3.81.0 published on Monday, Jun 24, 2024 by Pulumi

spotinst.spark.Ocean

Explore with Pulumi AI

spotinst logo
Spotinst v3.81.0 published on Monday, Jun 24, 2024 by Pulumi

    Manages a Spotinst Ocean Spark resource on AWS or GCP.

    Prerequisites

    An existing Ocean cluster is required by this resource. See e.g. the spotinst.aws.Ocean resource.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as spotinst from "@pulumi/spotinst";
    
    const example = new spotinst.spark.Ocean("example", {
        oceanClusterId: "ocean-cluster-id",
        ingress: {
            controller: {
                managed: true,
            },
            loadBalancer: {
                managed: true,
                targetGroupArn: "arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX",
                serviceAnnotations: {
                    "service.beta.kubernetes.io/aws-load-balancer-security-groups": "sg-XXXXXXXXXXXXXXXXX",
                    "some-service-annotation-2": "some-service-annotation-value-2",
                },
            },
            customEndpoint: {
                enabled: false,
                address: "my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com",
            },
            privateLink: {
                enabled: false,
                vpcEndpointService: "com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX",
            },
        },
        compute: {
            createVngs: true,
            useTaints: true,
        },
        logCollection: {
            collectAppLogs: true,
        },
        webhook: {
            useHostNetwork: false,
            hostNetworkPorts: [25554],
        },
        spark: {
            additionalAppNamespaces: [
                "extra-spark-app-ns-1",
                "extra-spark-app-ns-2",
            ],
        },
    });
    
    import pulumi
    import pulumi_spotinst as spotinst
    
    example = spotinst.spark.Ocean("example",
        ocean_cluster_id="ocean-cluster-id",
        ingress=spotinst.spark.OceanIngressArgs(
            controller=spotinst.spark.OceanIngressControllerArgs(
                managed=True,
            ),
            load_balancer=spotinst.spark.OceanIngressLoadBalancerArgs(
                managed=True,
                target_group_arn="arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX",
                service_annotations={
                    "service.beta.kubernetes.io/aws-load-balancer-security-groups": "sg-XXXXXXXXXXXXXXXXX",
                    "some-service-annotation-2": "some-service-annotation-value-2",
                },
            ),
            custom_endpoint=spotinst.spark.OceanIngressCustomEndpointArgs(
                enabled=False,
                address="my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com",
            ),
            private_link=spotinst.spark.OceanIngressPrivateLinkArgs(
                enabled=False,
                vpc_endpoint_service="com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX",
            ),
        ),
        compute=spotinst.spark.OceanComputeArgs(
            create_vngs=True,
            use_taints=True,
        ),
        log_collection=spotinst.spark.OceanLogCollectionArgs(
            collect_app_logs=True,
        ),
        webhook=spotinst.spark.OceanWebhookArgs(
            use_host_network=False,
            host_network_ports=[25554],
        ),
        spark=spotinst.spark.OceanSparkArgs(
            additional_app_namespaces=[
                "extra-spark-app-ns-1",
                "extra-spark-app-ns-2",
            ],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-spotinst/sdk/v3/go/spotinst/spark"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := spark.NewOcean(ctx, "example", &spark.OceanArgs{
    			OceanClusterId: pulumi.String("ocean-cluster-id"),
    			Ingress: &spark.OceanIngressArgs{
    				Controller: &spark.OceanIngressControllerArgs{
    					Managed: pulumi.Bool(true),
    				},
    				LoadBalancer: &spark.OceanIngressLoadBalancerArgs{
    					Managed:        pulumi.Bool(true),
    					TargetGroupArn: pulumi.String("arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX"),
    					ServiceAnnotations: pulumi.StringMap{
    						"service.beta.kubernetes.io/aws-load-balancer-security-groups": pulumi.String("sg-XXXXXXXXXXXXXXXXX"),
    						"some-service-annotation-2":                                    pulumi.String("some-service-annotation-value-2"),
    					},
    				},
    				CustomEndpoint: &spark.OceanIngressCustomEndpointArgs{
    					Enabled: pulumi.Bool(false),
    					Address: pulumi.String("my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com"),
    				},
    				PrivateLink: &spark.OceanIngressPrivateLinkArgs{
    					Enabled:            pulumi.Bool(false),
    					VpcEndpointService: pulumi.String("com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX"),
    				},
    			},
    			Compute: &spark.OceanComputeArgs{
    				CreateVngs: pulumi.Bool(true),
    				UseTaints:  pulumi.Bool(true),
    			},
    			LogCollection: &spark.OceanLogCollectionArgs{
    				CollectAppLogs: pulumi.Bool(true),
    			},
    			Webhook: &spark.OceanWebhookArgs{
    				UseHostNetwork: pulumi.Bool(false),
    				HostNetworkPorts: pulumi.IntArray{
    					pulumi.Int(25554),
    				},
    			},
    			Spark: &spark.OceanSparkArgs{
    				AdditionalAppNamespaces: pulumi.StringArray{
    					pulumi.String("extra-spark-app-ns-1"),
    					pulumi.String("extra-spark-app-ns-2"),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using SpotInst = Pulumi.SpotInst;
    
    return await Deployment.RunAsync(() => 
    {
        var example = new SpotInst.Spark.Ocean("example", new()
        {
            OceanClusterId = "ocean-cluster-id",
            Ingress = new SpotInst.Spark.Inputs.OceanIngressArgs
            {
                Controller = new SpotInst.Spark.Inputs.OceanIngressControllerArgs
                {
                    Managed = true,
                },
                LoadBalancer = new SpotInst.Spark.Inputs.OceanIngressLoadBalancerArgs
                {
                    Managed = true,
                    TargetGroupArn = "arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX",
                    ServiceAnnotations = 
                    {
                        { "service.beta.kubernetes.io/aws-load-balancer-security-groups", "sg-XXXXXXXXXXXXXXXXX" },
                        { "some-service-annotation-2", "some-service-annotation-value-2" },
                    },
                },
                CustomEndpoint = new SpotInst.Spark.Inputs.OceanIngressCustomEndpointArgs
                {
                    Enabled = false,
                    Address = "my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com",
                },
                PrivateLink = new SpotInst.Spark.Inputs.OceanIngressPrivateLinkArgs
                {
                    Enabled = false,
                    VpcEndpointService = "com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX",
                },
            },
            Compute = new SpotInst.Spark.Inputs.OceanComputeArgs
            {
                CreateVngs = true,
                UseTaints = true,
            },
            LogCollection = new SpotInst.Spark.Inputs.OceanLogCollectionArgs
            {
                CollectAppLogs = true,
            },
            Webhook = new SpotInst.Spark.Inputs.OceanWebhookArgs
            {
                UseHostNetwork = false,
                HostNetworkPorts = new[]
                {
                    25554,
                },
            },
            Spark = new SpotInst.Spark.Inputs.OceanSparkArgs
            {
                AdditionalAppNamespaces = new[]
                {
                    "extra-spark-app-ns-1",
                    "extra-spark-app-ns-2",
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.spotinst.spark.Ocean;
    import com.pulumi.spotinst.spark.OceanArgs;
    import com.pulumi.spotinst.spark.inputs.OceanIngressArgs;
    import com.pulumi.spotinst.spark.inputs.OceanIngressControllerArgs;
    import com.pulumi.spotinst.spark.inputs.OceanIngressLoadBalancerArgs;
    import com.pulumi.spotinst.spark.inputs.OceanIngressCustomEndpointArgs;
    import com.pulumi.spotinst.spark.inputs.OceanIngressPrivateLinkArgs;
    import com.pulumi.spotinst.spark.inputs.OceanComputeArgs;
    import com.pulumi.spotinst.spark.inputs.OceanLogCollectionArgs;
    import com.pulumi.spotinst.spark.inputs.OceanWebhookArgs;
    import com.pulumi.spotinst.spark.inputs.OceanSparkArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var example = new Ocean("example", OceanArgs.builder()
                .oceanClusterId("ocean-cluster-id")
                .ingress(OceanIngressArgs.builder()
                    .controller(OceanIngressControllerArgs.builder()
                        .managed(true)
                        .build())
                    .loadBalancer(OceanIngressLoadBalancerArgs.builder()
                        .managed(true)
                        .targetGroupArn("arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX")
                        .serviceAnnotations(Map.ofEntries(
                            Map.entry("service.beta.kubernetes.io/aws-load-balancer-security-groups", "sg-XXXXXXXXXXXXXXXXX"),
                            Map.entry("some-service-annotation-2", "some-service-annotation-value-2")
                        ))
                        .build())
                    .customEndpoint(OceanIngressCustomEndpointArgs.builder()
                        .enabled(false)
                        .address("my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com")
                        .build())
                    .privateLink(OceanIngressPrivateLinkArgs.builder()
                        .enabled(false)
                        .vpcEndpointService("com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX")
                        .build())
                    .build())
                .compute(OceanComputeArgs.builder()
                    .createVngs(true)
                    .useTaints(true)
                    .build())
                .logCollection(OceanLogCollectionArgs.builder()
                    .collectAppLogs(true)
                    .build())
                .webhook(OceanWebhookArgs.builder()
                    .useHostNetwork(false)
                    .hostNetworkPorts(25554)
                    .build())
                .spark(OceanSparkArgs.builder()
                    .additionalAppNamespaces(                
                        "extra-spark-app-ns-1",
                        "extra-spark-app-ns-2")
                    .build())
                .build());
    
        }
    }
    
    resources:
      example:
        type: spotinst:spark:Ocean
        properties:
          oceanClusterId: ocean-cluster-id
          ingress:
            controller:
              managed: true
            loadBalancer:
              managed: true
              targetGroupArn: arn:aws:elasticloadbalancing:eu-north-1:XXXXXXXXXXXX:targetgroup/my-spark-cluster-nlb-tg/a38c2b83XXXXXXXX
              serviceAnnotations:
                service.beta.kubernetes.io/aws-load-balancer-security-groups: sg-XXXXXXXXXXXXXXXXX
                some-service-annotation-2: some-service-annotation-value-2
            customEndpoint:
              enabled: false
              address: my-spark-cluster-nlb-8cbb8da7XXXXXXXX.elb.us-east-1.amazonaws.com
            privateLink:
              enabled: false
              vpcEndpointService: com.amazonaws.vpce.eu-north-1.vpce-svc-XXXXXXXXXXXXXXXXX
          compute:
            createVngs: true
            useTaints: true
          logCollection:
            collectAppLogs: true
          webhook:
            useHostNetwork: false
            hostNetworkPorts:
              - 25554
          spark:
            additionalAppNamespaces:
              - extra-spark-app-ns-1
              - extra-spark-app-ns-2
    
    output "ocean_spark_id" {
      value = spotinst_ocean_spark.example.id
    }
    

    Create Ocean Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Ocean(name: string, args: OceanArgs, opts?: CustomResourceOptions);
    @overload
    def Ocean(resource_name: str,
              args: OceanArgs,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Ocean(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              ocean_cluster_id: Optional[str] = None,
              compute: Optional[OceanComputeArgs] = None,
              ingress: Optional[OceanIngressArgs] = None,
              log_collection: Optional[OceanLogCollectionArgs] = None,
              spark: Optional[OceanSparkArgs] = None,
              webhook: Optional[OceanWebhookArgs] = None)
    func NewOcean(ctx *Context, name string, args OceanArgs, opts ...ResourceOption) (*Ocean, error)
    public Ocean(string name, OceanArgs args, CustomResourceOptions? opts = null)
    public Ocean(String name, OceanArgs args)
    public Ocean(String name, OceanArgs args, CustomResourceOptions options)
    
    type: spotinst:spark:Ocean
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args OceanArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args OceanArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args OceanArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args OceanArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args OceanArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var exampleoceanResourceResourceFromSparkocean = new SpotInst.Spark.Ocean("exampleoceanResourceResourceFromSparkocean", new()
    {
        OceanClusterId = "string",
        Compute = new SpotInst.Spark.Inputs.OceanComputeArgs
        {
            CreateVngs = false,
            UseTaints = false,
        },
        Ingress = new SpotInst.Spark.Inputs.OceanIngressArgs
        {
            Controller = new SpotInst.Spark.Inputs.OceanIngressControllerArgs
            {
                Managed = false,
            },
            CustomEndpoint = new SpotInst.Spark.Inputs.OceanIngressCustomEndpointArgs
            {
                Address = "string",
                Enabled = false,
            },
            LoadBalancer = new SpotInst.Spark.Inputs.OceanIngressLoadBalancerArgs
            {
                Managed = false,
                ServiceAnnotations = 
                {
                    { "string", "string" },
                },
                TargetGroupArn = "string",
            },
            PrivateLink = new SpotInst.Spark.Inputs.OceanIngressPrivateLinkArgs
            {
                Enabled = false,
                VpcEndpointService = "string",
            },
            ServiceAnnotations = 
            {
                { "string", "string" },
            },
        },
        LogCollection = new SpotInst.Spark.Inputs.OceanLogCollectionArgs
        {
            CollectAppLogs = false,
        },
        Spark = new SpotInst.Spark.Inputs.OceanSparkArgs
        {
            AdditionalAppNamespaces = new[]
            {
                "string",
            },
        },
        Webhook = new SpotInst.Spark.Inputs.OceanWebhookArgs
        {
            HostNetworkPorts = new[]
            {
                0,
            },
            UseHostNetwork = false,
        },
    });
    
    example, err := spark.NewOcean(ctx, "exampleoceanResourceResourceFromSparkocean", &spark.OceanArgs{
    	OceanClusterId: pulumi.String("string"),
    	Compute: &spark.OceanComputeArgs{
    		CreateVngs: pulumi.Bool(false),
    		UseTaints:  pulumi.Bool(false),
    	},
    	Ingress: &spark.OceanIngressArgs{
    		Controller: &spark.OceanIngressControllerArgs{
    			Managed: pulumi.Bool(false),
    		},
    		CustomEndpoint: &spark.OceanIngressCustomEndpointArgs{
    			Address: pulumi.String("string"),
    			Enabled: pulumi.Bool(false),
    		},
    		LoadBalancer: &spark.OceanIngressLoadBalancerArgs{
    			Managed: pulumi.Bool(false),
    			ServiceAnnotations: pulumi.StringMap{
    				"string": pulumi.String("string"),
    			},
    			TargetGroupArn: pulumi.String("string"),
    		},
    		PrivateLink: &spark.OceanIngressPrivateLinkArgs{
    			Enabled:            pulumi.Bool(false),
    			VpcEndpointService: pulumi.String("string"),
    		},
    		ServiceAnnotations: pulumi.StringMap{
    			"string": pulumi.String("string"),
    		},
    	},
    	LogCollection: &spark.OceanLogCollectionArgs{
    		CollectAppLogs: pulumi.Bool(false),
    	},
    	Spark: &spark.OceanSparkArgs{
    		AdditionalAppNamespaces: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    	},
    	Webhook: &spark.OceanWebhookArgs{
    		HostNetworkPorts: pulumi.IntArray{
    			pulumi.Int(0),
    		},
    		UseHostNetwork: pulumi.Bool(false),
    	},
    })
    
    var exampleoceanResourceResourceFromSparkocean = new Ocean("exampleoceanResourceResourceFromSparkocean", OceanArgs.builder()
        .oceanClusterId("string")
        .compute(OceanComputeArgs.builder()
            .createVngs(false)
            .useTaints(false)
            .build())
        .ingress(OceanIngressArgs.builder()
            .controller(OceanIngressControllerArgs.builder()
                .managed(false)
                .build())
            .customEndpoint(OceanIngressCustomEndpointArgs.builder()
                .address("string")
                .enabled(false)
                .build())
            .loadBalancer(OceanIngressLoadBalancerArgs.builder()
                .managed(false)
                .serviceAnnotations(Map.of("string", "string"))
                .targetGroupArn("string")
                .build())
            .privateLink(OceanIngressPrivateLinkArgs.builder()
                .enabled(false)
                .vpcEndpointService("string")
                .build())
            .serviceAnnotations(Map.of("string", "string"))
            .build())
        .logCollection(OceanLogCollectionArgs.builder()
            .collectAppLogs(false)
            .build())
        .spark(OceanSparkArgs.builder()
            .additionalAppNamespaces("string")
            .build())
        .webhook(OceanWebhookArgs.builder()
            .hostNetworkPorts(0)
            .useHostNetwork(false)
            .build())
        .build());
    
    exampleocean_resource_resource_from_sparkocean = spotinst.spark.Ocean("exampleoceanResourceResourceFromSparkocean",
        ocean_cluster_id="string",
        compute=spotinst.spark.OceanComputeArgs(
            create_vngs=False,
            use_taints=False,
        ),
        ingress=spotinst.spark.OceanIngressArgs(
            controller=spotinst.spark.OceanIngressControllerArgs(
                managed=False,
            ),
            custom_endpoint=spotinst.spark.OceanIngressCustomEndpointArgs(
                address="string",
                enabled=False,
            ),
            load_balancer=spotinst.spark.OceanIngressLoadBalancerArgs(
                managed=False,
                service_annotations={
                    "string": "string",
                },
                target_group_arn="string",
            ),
            private_link=spotinst.spark.OceanIngressPrivateLinkArgs(
                enabled=False,
                vpc_endpoint_service="string",
            ),
            service_annotations={
                "string": "string",
            },
        ),
        log_collection=spotinst.spark.OceanLogCollectionArgs(
            collect_app_logs=False,
        ),
        spark=spotinst.spark.OceanSparkArgs(
            additional_app_namespaces=["string"],
        ),
        webhook=spotinst.spark.OceanWebhookArgs(
            host_network_ports=[0],
            use_host_network=False,
        ))
    
    const exampleoceanResourceResourceFromSparkocean = new spotinst.spark.Ocean("exampleoceanResourceResourceFromSparkocean", {
        oceanClusterId: "string",
        compute: {
            createVngs: false,
            useTaints: false,
        },
        ingress: {
            controller: {
                managed: false,
            },
            customEndpoint: {
                address: "string",
                enabled: false,
            },
            loadBalancer: {
                managed: false,
                serviceAnnotations: {
                    string: "string",
                },
                targetGroupArn: "string",
            },
            privateLink: {
                enabled: false,
                vpcEndpointService: "string",
            },
            serviceAnnotations: {
                string: "string",
            },
        },
        logCollection: {
            collectAppLogs: false,
        },
        spark: {
            additionalAppNamespaces: ["string"],
        },
        webhook: {
            hostNetworkPorts: [0],
            useHostNetwork: false,
        },
    });
    
    type: spotinst:spark:Ocean
    properties:
        compute:
            createVngs: false
            useTaints: false
        ingress:
            controller:
                managed: false
            customEndpoint:
                address: string
                enabled: false
            loadBalancer:
                managed: false
                serviceAnnotations:
                    string: string
                targetGroupArn: string
            privateLink:
                enabled: false
                vpcEndpointService: string
            serviceAnnotations:
                string: string
        logCollection:
            collectAppLogs: false
        oceanClusterId: string
        spark:
            additionalAppNamespaces:
                - string
        webhook:
            hostNetworkPorts:
                - 0
            useHostNetwork: false
    

    Ocean Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Ocean resource accepts the following input properties:

    oceanClusterId String
    • The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
    compute OceanCompute
    ingress OceanIngress
    logCollection OceanLogCollection
    spark OceanSpark
    webhook OceanWebhook
    oceanClusterId string
    • The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
    compute OceanCompute
    ingress OceanIngress
    logCollection OceanLogCollection
    spark OceanSpark
    webhook OceanWebhook
    oceanClusterId String
    • The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
    compute Property Map
    ingress Property Map
    logCollection Property Map
    spark Property Map
    webhook Property Map

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Ocean resource produces the following output properties:

    Id string
    The provider-assigned unique ID for this managed resource.
    Id string
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.
    id string
    The provider-assigned unique ID for this managed resource.
    id str
    The provider-assigned unique ID for this managed resource.
    id String
    The provider-assigned unique ID for this managed resource.

    Look up Existing Ocean Resource

    Get an existing Ocean resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: OceanState, opts?: CustomResourceOptions): Ocean
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            compute: Optional[OceanComputeArgs] = None,
            ingress: Optional[OceanIngressArgs] = None,
            log_collection: Optional[OceanLogCollectionArgs] = None,
            ocean_cluster_id: Optional[str] = None,
            spark: Optional[OceanSparkArgs] = None,
            webhook: Optional[OceanWebhookArgs] = None) -> Ocean
    func GetOcean(ctx *Context, name string, id IDInput, state *OceanState, opts ...ResourceOption) (*Ocean, error)
    public static Ocean Get(string name, Input<string> id, OceanState? state, CustomResourceOptions? opts = null)
    public static Ocean get(String name, Output<String> id, OceanState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    compute OceanCompute
    ingress OceanIngress
    logCollection OceanLogCollection
    oceanClusterId String
    • The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
    spark OceanSpark
    webhook OceanWebhook
    compute OceanCompute
    ingress OceanIngress
    logCollection OceanLogCollection
    oceanClusterId string
    • The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
    spark OceanSpark
    webhook OceanWebhook
    compute Property Map
    ingress Property Map
    logCollection Property Map
    oceanClusterId String
    • The ID of the Ocean cluster that Ocean for Apache Spark should be installed on.
    spark Property Map
    webhook Property Map

    Supporting Types

    OceanCompute, OceanComputeArgs

    CreateVngs bool
    • Enable/disable the creation of Ocean Spark VNGs during cluster creation.
    UseTaints bool
    • Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
    CreateVngs bool
    • Enable/disable the creation of Ocean Spark VNGs during cluster creation.
    UseTaints bool
    • Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
    createVngs Boolean
    • Enable/disable the creation of Ocean Spark VNGs during cluster creation.
    useTaints Boolean
    • Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
    createVngs boolean
    • Enable/disable the creation of Ocean Spark VNGs during cluster creation.
    useTaints boolean
    • Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
    create_vngs bool
    • Enable/disable the creation of Ocean Spark VNGs during cluster creation.
    use_taints bool
    • Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.
    createVngs Boolean
    • Enable/disable the creation of Ocean Spark VNGs during cluster creation.
    useTaints Boolean
    • Enable/disable Ocean Spark taints on the Ocean Spark VNGs. By default, Ocean Spark uses taints to prevent non-Spark workloads from running on Ocean Spark VNGs.

    OceanIngress, OceanIngressArgs

    OceanIngressController, OceanIngressControllerArgs

    Managed bool
    • Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
    Managed bool
    • Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
    managed Boolean
    • Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
    managed boolean
    • Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
    managed bool
    • Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.
    managed Boolean
    • Should an ingress controller managed by Ocean for Apache Spark be installed on the cluster.

    OceanIngressCustomEndpoint, OceanIngressCustomEndpointArgs

    Address string
    • The address the Ocean for Apache Spark control plane will use when addressing the cluster.
    Enabled bool
    • Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
    Address string
    • The address the Ocean for Apache Spark control plane will use when addressing the cluster.
    Enabled bool
    • Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
    address String
    • The address the Ocean for Apache Spark control plane will use when addressing the cluster.
    enabled Boolean
    • Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
    address string
    • The address the Ocean for Apache Spark control plane will use when addressing the cluster.
    enabled boolean
    • Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
    address str
    • The address the Ocean for Apache Spark control plane will use when addressing the cluster.
    enabled bool
    • Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.
    address String
    • The address the Ocean for Apache Spark control plane will use when addressing the cluster.
    enabled Boolean
    • Should the Ocean for Apache Spark control plane address the cluster using a custom endpoint. Use this to specify the DNS address of an externally provisioned (unmanaged) load balancer.

    OceanIngressLoadBalancer, OceanIngressLoadBalancerArgs

    Managed bool
    • Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
    ServiceAnnotations Dictionary<string, string>
    • Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
    TargetGroupArn string
    • The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
    Managed bool
    • Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
    ServiceAnnotations map[string]string
    • Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
    TargetGroupArn string
    • The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
    managed Boolean
    • Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
    serviceAnnotations Map<String,String>
    • Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
    targetGroupArn String
    • The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
    managed boolean
    • Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
    serviceAnnotations {[key: string]: string}
    • Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
    targetGroupArn string
    • The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
    managed bool
    • Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
    service_annotations Mapping[str, str]
    • Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
    target_group_arn str
    • The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
    managed Boolean
    • Should a load balancer managed by Ocean for Apache Spark be provisioned for the cluster. Set this to false if you want to use an existing load balancer (only available on AWS).
    serviceAnnotations Map<String>
    • Annotations to add to the ingress controller load balancer service. This is useful to configure properties of the managed load balancer, like the nature of the load balancer (e.g. ELB, NLB, ALB on AWS), the security groups, or various timeouts.
    targetGroupArn String
    • The ARN of a target group that the Ocean for Apache Spark ingress controller will be bound to. Set this to use an existing load balancer with Ocean for Apache Spark. Has no effect if using a managed load balancer. Only available on AWS.
    Enabled bool
    • Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
    VpcEndpointService string
    • The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
    Enabled bool
    • Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
    VpcEndpointService string
    • The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
    enabled Boolean
    • Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
    vpcEndpointService String
    • The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
    enabled boolean
    • Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
    vpcEndpointService string
    • The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
    enabled bool
    • Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
    vpc_endpoint_service str
    • The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.
    enabled Boolean
    • Should the Ocean for Apache Spark control plane address the cluster via an AWS Private Link. Only available on AWS.
    vpcEndpointService String
    • The name of the VPC Endpoint Service the Ocean for Apache Spark control plane should bind to.

    OceanLogCollection, OceanLogCollectionArgs

    CollectAppLogs bool
    • Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
    CollectAppLogs bool
    • Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
    collectAppLogs Boolean
    • Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
    collectAppLogs boolean
    • Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
    collect_app_logs bool
    • Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.
    collectAppLogs Boolean
    • Enable/Disable collecting driver and executor logs. When enabled, logs are stored by NetApp and can be downloaded from the Spot console web interface. The logs are deleted after 30 days.

    OceanSpark, OceanSparkArgs

    AdditionalAppNamespaces List<string>
    • List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace spark-apps.
    AdditionalAppNamespaces []string
    • List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace spark-apps.
    additionalAppNamespaces List<String>
    • List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace spark-apps.
    additionalAppNamespaces string[]
    • List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace spark-apps.
    additional_app_namespaces Sequence[str]
    • List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace spark-apps.
    additionalAppNamespaces List<String>
    • List of Kubernetes namespaces that should be configured to run Spark applications, in addition to the default Spark application namespace spark-apps.

    OceanWebhook, OceanWebhookArgs

    HostNetworkPorts List<int>
    • List of ports allowed to use on the host network - if empty default is 25554.
    UseHostNetwork bool
    • Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
    HostNetworkPorts []int
    • List of ports allowed to use on the host network - if empty default is 25554.
    UseHostNetwork bool
    • Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
    hostNetworkPorts List<Integer>
    • List of ports allowed to use on the host network - if empty default is 25554.
    useHostNetwork Boolean
    • Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
    hostNetworkPorts number[]
    • List of ports allowed to use on the host network - if empty default is 25554.
    useHostNetwork boolean
    • Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
    host_network_ports Sequence[int]
    • List of ports allowed to use on the host network - if empty default is 25554.
    use_host_network bool
    • Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.
    hostNetworkPorts List<Number>
    • List of ports allowed to use on the host network - if empty default is 25554.
    useHostNetwork Boolean
    • Enable/disable host networking for the Spark Operator. Host networking can be useful when using custom CNI plugins like Calico on EKS.

    Package Details

    Repository
    Spotinst pulumi/pulumi-spotinst
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the spotinst Terraform Provider.
    spotinst logo
    Spotinst v3.81.0 published on Monday, Jun 24, 2024 by Pulumi