1. Packages
  2. Confluent Cloud
  3. API Docs
  4. KafkaCluster
Confluent v1.48.0 published on Friday, Jun 21, 2024 by Pulumi

confluentcloud.KafkaCluster

Explore with Pulumi AI

confluentcloud logo
Confluent v1.48.0 published on Friday, Jun 21, 2024 by Pulumi

    Example Usage

    Example Kafka clusters on AWS

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const development = new confluentcloud.Environment("development", {displayName: "Development"});
    const basic = new confluentcloud.KafkaCluster("basic", {
        displayName: "basic_kafka_cluster",
        availability: "SINGLE_ZONE",
        cloud: "AWS",
        region: "us-east-2",
        basic: {},
        environment: {
            id: development.id,
        },
    });
    const standard = new confluentcloud.KafkaCluster("standard", {
        displayName: "standard_kafka_cluster",
        availability: "SINGLE_ZONE",
        cloud: "AWS",
        region: "us-east-2",
        standard: {},
        environment: {
            id: development.id,
        },
    });
    const dedicated = new confluentcloud.KafkaCluster("dedicated", {
        displayName: "dedicated_kafka_cluster",
        availability: "MULTI_ZONE",
        cloud: "AWS",
        region: "us-east-2",
        dedicated: {
            cku: 2,
        },
        environment: {
            id: development.id,
        },
    });
    const freight = new confluentcloud.KafkaCluster("freight", {
        freights: [{}],
        displayName: "freight_kafka_cluster",
        availability: "HIGH",
        cloud: "AWS",
        region: "us-east-1",
        environment: {
            id: staging.id,
        },
        network: {
            id: peering.id,
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    development = confluentcloud.Environment("development", display_name="Development")
    basic = confluentcloud.KafkaCluster("basic",
        display_name="basic_kafka_cluster",
        availability="SINGLE_ZONE",
        cloud="AWS",
        region="us-east-2",
        basic=confluentcloud.KafkaClusterBasicArgs(),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    standard = confluentcloud.KafkaCluster("standard",
        display_name="standard_kafka_cluster",
        availability="SINGLE_ZONE",
        cloud="AWS",
        region="us-east-2",
        standard=confluentcloud.KafkaClusterStandardArgs(),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    dedicated = confluentcloud.KafkaCluster("dedicated",
        display_name="dedicated_kafka_cluster",
        availability="MULTI_ZONE",
        cloud="AWS",
        region="us-east-2",
        dedicated=confluentcloud.KafkaClusterDedicatedArgs(
            cku=2,
        ),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    freight = confluentcloud.KafkaCluster("freight",
        freights=[confluentcloud.KafkaClusterFreightArgs()],
        display_name="freight_kafka_cluster",
        availability="HIGH",
        cloud="AWS",
        region="us-east-1",
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=staging["id"],
        ),
        network=confluentcloud.KafkaClusterNetworkArgs(
            id=peering["id"],
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
    			DisplayName: pulumi.String("Development"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("basic_kafka_cluster"),
    			Availability: pulumi.String("SINGLE_ZONE"),
    			Cloud:        pulumi.String("AWS"),
    			Region:       pulumi.String("us-east-2"),
    			Basic:        nil,
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("standard_kafka_cluster"),
    			Availability: pulumi.String("SINGLE_ZONE"),
    			Cloud:        pulumi.String("AWS"),
    			Region:       pulumi.String("us-east-2"),
    			Standard:     nil,
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
    			Availability: pulumi.String("MULTI_ZONE"),
    			Cloud:        pulumi.String("AWS"),
    			Region:       pulumi.String("us-east-2"),
    			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
    				Cku: pulumi.Int(2),
    			},
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "freight", &confluentcloud.KafkaClusterArgs{
    			Freights: confluentcloud.KafkaClusterFreightArray{
    				nil,
    			},
    			DisplayName:  pulumi.String("freight_kafka_cluster"),
    			Availability: pulumi.String("HIGH"),
    			Cloud:        pulumi.String("AWS"),
    			Region:       pulumi.String("us-east-1"),
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: pulumi.Any(staging.Id),
    			},
    			Network: &confluentcloud.KafkaClusterNetworkArgs{
    				Id: pulumi.Any(peering.Id),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var development = new ConfluentCloud.Environment("development", new()
        {
            DisplayName = "Development",
        });
    
        var basic = new ConfluentCloud.KafkaCluster("basic", new()
        {
            DisplayName = "basic_kafka_cluster",
            Availability = "SINGLE_ZONE",
            Cloud = "AWS",
            Region = "us-east-2",
            Basic = null,
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var standard = new ConfluentCloud.KafkaCluster("standard", new()
        {
            DisplayName = "standard_kafka_cluster",
            Availability = "SINGLE_ZONE",
            Cloud = "AWS",
            Region = "us-east-2",
            Standard = null,
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
        {
            DisplayName = "dedicated_kafka_cluster",
            Availability = "MULTI_ZONE",
            Cloud = "AWS",
            Region = "us-east-2",
            Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
            {
                Cku = 2,
            },
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var freight = new ConfluentCloud.KafkaCluster("freight", new()
        {
            Freights = new[]
            {
                null,
            },
            DisplayName = "freight_kafka_cluster",
            Availability = "HIGH",
            Cloud = "AWS",
            Region = "us-east-1",
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = staging.Id,
            },
            Network = new ConfluentCloud.Inputs.KafkaClusterNetworkArgs
            {
                Id = peering.Id,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Environment;
    import com.pulumi.confluentcloud.EnvironmentArgs;
    import com.pulumi.confluentcloud.KafkaCluster;
    import com.pulumi.confluentcloud.KafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterFreightArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterNetworkArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var development = new Environment("development", EnvironmentArgs.builder()
                .displayName("Development")
                .build());
    
            var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
                .displayName("basic_kafka_cluster")
                .availability("SINGLE_ZONE")
                .cloud("AWS")
                .region("us-east-2")
                .basic()
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
                .displayName("standard_kafka_cluster")
                .availability("SINGLE_ZONE")
                .cloud("AWS")
                .region("us-east-2")
                .standard()
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
                .displayName("dedicated_kafka_cluster")
                .availability("MULTI_ZONE")
                .cloud("AWS")
                .region("us-east-2")
                .dedicated(KafkaClusterDedicatedArgs.builder()
                    .cku(2)
                    .build())
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var freight = new KafkaCluster("freight", KafkaClusterArgs.builder()
                .freights()
                .displayName("freight_kafka_cluster")
                .availability("HIGH")
                .cloud("AWS")
                .region("us-east-1")
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(staging.id())
                    .build())
                .network(KafkaClusterNetworkArgs.builder()
                    .id(peering.id())
                    .build())
                .build());
    
        }
    }
    
    resources:
      development:
        type: confluentcloud:Environment
        properties:
          displayName: Development
      basic:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: basic_kafka_cluster
          availability: SINGLE_ZONE
          cloud: AWS
          region: us-east-2
          basic: {}
          environment:
            id: ${development.id}
      standard:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: standard_kafka_cluster
          availability: SINGLE_ZONE
          cloud: AWS
          region: us-east-2
          standard: {}
          environment:
            id: ${development.id}
      dedicated:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: dedicated_kafka_cluster
          availability: MULTI_ZONE
          cloud: AWS
          region: us-east-2
          dedicated:
            cku: 2
          environment:
            id: ${development.id}
      freight:
        type: confluentcloud:KafkaCluster
        properties:
          freights:
            - {}
          displayName: freight_kafka_cluster
          availability: HIGH
          cloud: AWS
          region: us-east-1
          environment:
            id: ${staging.id}
          network:
            id: ${peering.id}
    

    Example Kafka clusters on Azure

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const development = new confluentcloud.Environment("development", {displayName: "Development"});
    const basic = new confluentcloud.KafkaCluster("basic", {
        displayName: "basic_kafka_cluster",
        availability: "SINGLE_ZONE",
        cloud: "AZURE",
        region: "centralus",
        basic: {},
        environment: {
            id: development.id,
        },
    });
    const standard = new confluentcloud.KafkaCluster("standard", {
        displayName: "standard_kafka_cluster",
        availability: "SINGLE_ZONE",
        cloud: "AZURE",
        region: "centralus",
        standard: {},
        environment: {
            id: development.id,
        },
    });
    const dedicated = new confluentcloud.KafkaCluster("dedicated", {
        displayName: "dedicated_kafka_cluster",
        availability: "MULTI_ZONE",
        cloud: "AZURE",
        region: "centralus",
        dedicated: {
            cku: 2,
        },
        environment: {
            id: development.id,
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    development = confluentcloud.Environment("development", display_name="Development")
    basic = confluentcloud.KafkaCluster("basic",
        display_name="basic_kafka_cluster",
        availability="SINGLE_ZONE",
        cloud="AZURE",
        region="centralus",
        basic=confluentcloud.KafkaClusterBasicArgs(),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    standard = confluentcloud.KafkaCluster("standard",
        display_name="standard_kafka_cluster",
        availability="SINGLE_ZONE",
        cloud="AZURE",
        region="centralus",
        standard=confluentcloud.KafkaClusterStandardArgs(),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    dedicated = confluentcloud.KafkaCluster("dedicated",
        display_name="dedicated_kafka_cluster",
        availability="MULTI_ZONE",
        cloud="AZURE",
        region="centralus",
        dedicated=confluentcloud.KafkaClusterDedicatedArgs(
            cku=2,
        ),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
    			DisplayName: pulumi.String("Development"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("basic_kafka_cluster"),
    			Availability: pulumi.String("SINGLE_ZONE"),
    			Cloud:        pulumi.String("AZURE"),
    			Region:       pulumi.String("centralus"),
    			Basic:        nil,
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("standard_kafka_cluster"),
    			Availability: pulumi.String("SINGLE_ZONE"),
    			Cloud:        pulumi.String("AZURE"),
    			Region:       pulumi.String("centralus"),
    			Standard:     nil,
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
    			Availability: pulumi.String("MULTI_ZONE"),
    			Cloud:        pulumi.String("AZURE"),
    			Region:       pulumi.String("centralus"),
    			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
    				Cku: pulumi.Int(2),
    			},
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var development = new ConfluentCloud.Environment("development", new()
        {
            DisplayName = "Development",
        });
    
        var basic = new ConfluentCloud.KafkaCluster("basic", new()
        {
            DisplayName = "basic_kafka_cluster",
            Availability = "SINGLE_ZONE",
            Cloud = "AZURE",
            Region = "centralus",
            Basic = null,
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var standard = new ConfluentCloud.KafkaCluster("standard", new()
        {
            DisplayName = "standard_kafka_cluster",
            Availability = "SINGLE_ZONE",
            Cloud = "AZURE",
            Region = "centralus",
            Standard = null,
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
        {
            DisplayName = "dedicated_kafka_cluster",
            Availability = "MULTI_ZONE",
            Cloud = "AZURE",
            Region = "centralus",
            Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
            {
                Cku = 2,
            },
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Environment;
    import com.pulumi.confluentcloud.EnvironmentArgs;
    import com.pulumi.confluentcloud.KafkaCluster;
    import com.pulumi.confluentcloud.KafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var development = new Environment("development", EnvironmentArgs.builder()
                .displayName("Development")
                .build());
    
            var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
                .displayName("basic_kafka_cluster")
                .availability("SINGLE_ZONE")
                .cloud("AZURE")
                .region("centralus")
                .basic()
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
                .displayName("standard_kafka_cluster")
                .availability("SINGLE_ZONE")
                .cloud("AZURE")
                .region("centralus")
                .standard()
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
                .displayName("dedicated_kafka_cluster")
                .availability("MULTI_ZONE")
                .cloud("AZURE")
                .region("centralus")
                .dedicated(KafkaClusterDedicatedArgs.builder()
                    .cku(2)
                    .build())
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
        }
    }
    
    resources:
      development:
        type: confluentcloud:Environment
        properties:
          displayName: Development
      basic:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: basic_kafka_cluster
          availability: SINGLE_ZONE
          cloud: AZURE
          region: centralus
          basic: {}
          environment:
            id: ${development.id}
      standard:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: standard_kafka_cluster
          availability: SINGLE_ZONE
          cloud: AZURE
          region: centralus
          standard: {}
          environment:
            id: ${development.id}
      dedicated:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: dedicated_kafka_cluster
          availability: MULTI_ZONE
          cloud: AZURE
          region: centralus
          dedicated:
            cku: 2
          environment:
            id: ${development.id}
    

    Example Kafka clusters on GCP

    import * as pulumi from "@pulumi/pulumi";
    import * as confluentcloud from "@pulumi/confluentcloud";
    
    const development = new confluentcloud.Environment("development", {displayName: "Development"});
    const basic = new confluentcloud.KafkaCluster("basic", {
        displayName: "basic_kafka_cluster",
        availability: "SINGLE_ZONE",
        cloud: "GCP",
        region: "us-central1",
        basic: {},
        environment: {
            id: development.id,
        },
    });
    const standard = new confluentcloud.KafkaCluster("standard", {
        displayName: "standard_kafka_cluster",
        availability: "SINGLE_ZONE",
        cloud: "GCP",
        region: "us-central1",
        standard: {},
        environment: {
            id: development.id,
        },
    });
    const dedicated = new confluentcloud.KafkaCluster("dedicated", {
        displayName: "dedicated_kafka_cluster",
        availability: "MULTI_ZONE",
        cloud: "GCP",
        region: "us-central1",
        dedicated: {
            cku: 2,
        },
        environment: {
            id: development.id,
        },
    });
    
    import pulumi
    import pulumi_confluentcloud as confluentcloud
    
    development = confluentcloud.Environment("development", display_name="Development")
    basic = confluentcloud.KafkaCluster("basic",
        display_name="basic_kafka_cluster",
        availability="SINGLE_ZONE",
        cloud="GCP",
        region="us-central1",
        basic=confluentcloud.KafkaClusterBasicArgs(),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    standard = confluentcloud.KafkaCluster("standard",
        display_name="standard_kafka_cluster",
        availability="SINGLE_ZONE",
        cloud="GCP",
        region="us-central1",
        standard=confluentcloud.KafkaClusterStandardArgs(),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    dedicated = confluentcloud.KafkaCluster("dedicated",
        display_name="dedicated_kafka_cluster",
        availability="MULTI_ZONE",
        cloud="GCP",
        region="us-central1",
        dedicated=confluentcloud.KafkaClusterDedicatedArgs(
            cku=2,
        ),
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id=development.id,
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-confluentcloud/sdk/go/confluentcloud"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
    			DisplayName: pulumi.String("Development"),
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("basic_kafka_cluster"),
    			Availability: pulumi.String("SINGLE_ZONE"),
    			Cloud:        pulumi.String("GCP"),
    			Region:       pulumi.String("us-central1"),
    			Basic:        nil,
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("standard_kafka_cluster"),
    			Availability: pulumi.String("SINGLE_ZONE"),
    			Cloud:        pulumi.String("GCP"),
    			Region:       pulumi.String("us-central1"),
    			Standard:     nil,
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
    			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
    			Availability: pulumi.String("MULTI_ZONE"),
    			Cloud:        pulumi.String("GCP"),
    			Region:       pulumi.String("us-central1"),
    			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
    				Cku: pulumi.Int(2),
    			},
    			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    				Id: development.ID(),
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using ConfluentCloud = Pulumi.ConfluentCloud;
    
    return await Deployment.RunAsync(() => 
    {
        var development = new ConfluentCloud.Environment("development", new()
        {
            DisplayName = "Development",
        });
    
        var basic = new ConfluentCloud.KafkaCluster("basic", new()
        {
            DisplayName = "basic_kafka_cluster",
            Availability = "SINGLE_ZONE",
            Cloud = "GCP",
            Region = "us-central1",
            Basic = null,
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var standard = new ConfluentCloud.KafkaCluster("standard", new()
        {
            DisplayName = "standard_kafka_cluster",
            Availability = "SINGLE_ZONE",
            Cloud = "GCP",
            Region = "us-central1",
            Standard = null,
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
        var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
        {
            DisplayName = "dedicated_kafka_cluster",
            Availability = "MULTI_ZONE",
            Cloud = "GCP",
            Region = "us-central1",
            Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
            {
                Cku = 2,
            },
            Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
            {
                Id = development.Id,
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.confluentcloud.Environment;
    import com.pulumi.confluentcloud.EnvironmentArgs;
    import com.pulumi.confluentcloud.KafkaCluster;
    import com.pulumi.confluentcloud.KafkaClusterArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
    import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var development = new Environment("development", EnvironmentArgs.builder()
                .displayName("Development")
                .build());
    
            var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
                .displayName("basic_kafka_cluster")
                .availability("SINGLE_ZONE")
                .cloud("GCP")
                .region("us-central1")
                .basic()
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
                .displayName("standard_kafka_cluster")
                .availability("SINGLE_ZONE")
                .cloud("GCP")
                .region("us-central1")
                .standard()
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
            var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
                .displayName("dedicated_kafka_cluster")
                .availability("MULTI_ZONE")
                .cloud("GCP")
                .region("us-central1")
                .dedicated(KafkaClusterDedicatedArgs.builder()
                    .cku(2)
                    .build())
                .environment(KafkaClusterEnvironmentArgs.builder()
                    .id(development.id())
                    .build())
                .build());
    
        }
    }
    
    resources:
      development:
        type: confluentcloud:Environment
        properties:
          displayName: Development
      basic:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: basic_kafka_cluster
          availability: SINGLE_ZONE
          cloud: GCP
          region: us-central1
          basic: {}
          environment:
            id: ${development.id}
      standard:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: standard_kafka_cluster
          availability: SINGLE_ZONE
          cloud: GCP
          region: us-central1
          standard: {}
          environment:
            id: ${development.id}
      dedicated:
        type: confluentcloud:KafkaCluster
        properties:
          displayName: dedicated_kafka_cluster
          availability: MULTI_ZONE
          cloud: GCP
          region: us-central1
          dedicated:
            cku: 2
          environment:
            id: ${development.id}
    

    Getting Started

    The following end-to-end examples might help to get started with confluentcloud.KafkaCluster resource:

    • basic-kafka-acls: Basic Kafka cluster with authorization using ACLs
    • basic-kafka-acls-with-alias: Basic Kafka cluster with authorization using ACLs
    • standard-kafka-acls: Standard Kafka cluster with authorization using ACLs
    • standard-kafka-rbac: Standard Kafka cluster with authorization using RBAC
    • dedicated-public-kafka-acls: Dedicated Kafka cluster that is accessible over the public internet with authorization using ACLs
    • dedicated-public-kafka-rbac: Dedicated Kafka cluster that is accessible over the public internet with authorization using RBAC
    • dedicated-privatelink-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using ACLs
    • dedicated-privatelink-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using RBAC
    • dedicated-privatelink-azure-kafka-rbac: Dedicated Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using RBAC
    • dedicated-privatelink-azure-kafka-acls: Dedicated Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using ACLs
    • dedicated-private-service-connect-gcp-kafka-acls: Dedicated Kafka cluster on GCP that is accessible via Private Service Connect connections with authorization using ACLs
    • dedicated-private-service-connect-gcp-kafka-rbac: Dedicated Kafka cluster on GCP that is accessible via Private Service Connect connections with authorization using RBAC
    • dedicated-vnet-peering-azure-kafka-acls: Dedicated Kafka cluster on Azure that is accessible via VPC Peering connections with authorization using ACLs
    • dedicated-vnet-peering-azure-kafka-rbac: Dedicated Kafka cluster on Azure that is accessible via VPC Peering connections with authorization using RBAC
    • dedicated-vpc-peering-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via VPC Peering connections with authorization using ACLs
    • dedicated-vpc-peering-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via VPC Peering connections with authorization using RBAC
    • dedicated-vpc-peering-gcp-kafka-acls: Dedicated Kafka cluster on GCP that is accessible via VPC Peering connections with authorization using ACLs
    • dedicated-vpc-peering-gcp-kafka-rbac: Dedicated Kafka cluster on GCP that is accessible via VPC Peering connections with authorization using RBAC
    • dedicated-transit-gateway-attachment-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via Transit Gateway Endpoint with authorization using ACLs
    • dedicated-transit-gateway-attachment-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via Transit Gateway Endpoint with authorization using RBAC
    • enterprise-privatelinkattachment-aws-kafka-acls: Enterprise Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using ACLs
    • enterprise-privatelinkattachment-azure-kafka-acls: Enterprise Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using ACLs

    Create KafkaCluster Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new KafkaCluster(name: string, args: KafkaClusterArgs, opts?: CustomResourceOptions);
    @overload
    def KafkaCluster(resource_name: str,
                     args: KafkaClusterArgs,
                     opts: Optional[ResourceOptions] = None)
    
    @overload
    def KafkaCluster(resource_name: str,
                     opts: Optional[ResourceOptions] = None,
                     availability: Optional[str] = None,
                     cloud: Optional[str] = None,
                     environment: Optional[KafkaClusterEnvironmentArgs] = None,
                     region: Optional[str] = None,
                     basic: Optional[KafkaClusterBasicArgs] = None,
                     byok_key: Optional[KafkaClusterByokKeyArgs] = None,
                     dedicated: Optional[KafkaClusterDedicatedArgs] = None,
                     display_name: Optional[str] = None,
                     enterprises: Optional[Sequence[KafkaClusterEnterpriseArgs]] = None,
                     freights: Optional[Sequence[KafkaClusterFreightArgs]] = None,
                     network: Optional[KafkaClusterNetworkArgs] = None,
                     standard: Optional[KafkaClusterStandardArgs] = None)
    func NewKafkaCluster(ctx *Context, name string, args KafkaClusterArgs, opts ...ResourceOption) (*KafkaCluster, error)
    public KafkaCluster(string name, KafkaClusterArgs args, CustomResourceOptions? opts = null)
    public KafkaCluster(String name, KafkaClusterArgs args)
    public KafkaCluster(String name, KafkaClusterArgs args, CustomResourceOptions options)
    
    type: confluentcloud:KafkaCluster
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args KafkaClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args KafkaClusterArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args KafkaClusterArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args KafkaClusterArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args KafkaClusterArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var kafkaClusterResource = new ConfluentCloud.KafkaCluster("kafkaClusterResource", new()
    {
        Availability = "string",
        Cloud = "string",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = "string",
        },
        Region = "string",
        Basic = null,
        ByokKey = new ConfluentCloud.Inputs.KafkaClusterByokKeyArgs
        {
            Id = "string",
        },
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 0,
            EncryptionKey = "string",
            Zones = new[]
            {
                "string",
            },
        },
        DisplayName = "string",
        Enterprises = new[]
        {
            null,
        },
        Freights = new[]
        {
            new ConfluentCloud.Inputs.KafkaClusterFreightArgs
            {
                Zones = new[]
                {
                    "string",
                },
            },
        },
        Network = new ConfluentCloud.Inputs.KafkaClusterNetworkArgs
        {
            Id = "string",
        },
        Standard = null,
    });
    
    example, err := confluentcloud.NewKafkaCluster(ctx, "kafkaClusterResource", &confluentcloud.KafkaClusterArgs{
    	Availability: pulumi.String("string"),
    	Cloud:        pulumi.String("string"),
    	Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
    		Id: pulumi.String("string"),
    	},
    	Region: pulumi.String("string"),
    	Basic:  nil,
    	ByokKey: &confluentcloud.KafkaClusterByokKeyArgs{
    		Id: pulumi.String("string"),
    	},
    	Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
    		Cku:           pulumi.Int(0),
    		EncryptionKey: pulumi.String("string"),
    		Zones: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    	},
    	DisplayName: pulumi.String("string"),
    	Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
    		nil,
    	},
    	Freights: confluentcloud.KafkaClusterFreightArray{
    		&confluentcloud.KafkaClusterFreightArgs{
    			Zones: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	Network: &confluentcloud.KafkaClusterNetworkArgs{
    		Id: pulumi.String("string"),
    	},
    	Standard: nil,
    })
    
    var kafkaClusterResource = new KafkaCluster("kafkaClusterResource", KafkaClusterArgs.builder()
        .availability("string")
        .cloud("string")
        .environment(KafkaClusterEnvironmentArgs.builder()
            .id("string")
            .build())
        .region("string")
        .basic()
        .byokKey(KafkaClusterByokKeyArgs.builder()
            .id("string")
            .build())
        .dedicated(KafkaClusterDedicatedArgs.builder()
            .cku(0)
            .encryptionKey("string")
            .zones("string")
            .build())
        .displayName("string")
        .enterprises()
        .freights(KafkaClusterFreightArgs.builder()
            .zones("string")
            .build())
        .network(KafkaClusterNetworkArgs.builder()
            .id("string")
            .build())
        .standard()
        .build());
    
    kafka_cluster_resource = confluentcloud.KafkaCluster("kafkaClusterResource",
        availability="string",
        cloud="string",
        environment=confluentcloud.KafkaClusterEnvironmentArgs(
            id="string",
        ),
        region="string",
        basic=confluentcloud.KafkaClusterBasicArgs(),
        byok_key=confluentcloud.KafkaClusterByokKeyArgs(
            id="string",
        ),
        dedicated=confluentcloud.KafkaClusterDedicatedArgs(
            cku=0,
            encryption_key="string",
            zones=["string"],
        ),
        display_name="string",
        enterprises=[confluentcloud.KafkaClusterEnterpriseArgs()],
        freights=[confluentcloud.KafkaClusterFreightArgs(
            zones=["string"],
        )],
        network=confluentcloud.KafkaClusterNetworkArgs(
            id="string",
        ),
        standard=confluentcloud.KafkaClusterStandardArgs())
    
    const kafkaClusterResource = new confluentcloud.KafkaCluster("kafkaClusterResource", {
        availability: "string",
        cloud: "string",
        environment: {
            id: "string",
        },
        region: "string",
        basic: {},
        byokKey: {
            id: "string",
        },
        dedicated: {
            cku: 0,
            encryptionKey: "string",
            zones: ["string"],
        },
        displayName: "string",
        enterprises: [{}],
        freights: [{
            zones: ["string"],
        }],
        network: {
            id: "string",
        },
        standard: {},
    });
    
    type: confluentcloud:KafkaCluster
    properties:
        availability: string
        basic: {}
        byokKey:
            id: string
        cloud: string
        dedicated:
            cku: 0
            encryptionKey: string
            zones:
                - string
        displayName: string
        enterprises:
            - {}
        environment:
            id: string
        freights:
            - zones:
                - string
        network:
            id: string
        region: string
        standard: {}
    

    KafkaCluster Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The KafkaCluster resource accepts the following input properties:

    Availability string
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    Cloud string
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    Environment Pulumi.ConfluentCloud.Inputs.KafkaClusterEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    Region string
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    Basic Pulumi.ConfluentCloud.Inputs.KafkaClusterBasic
    The configuration of the Basic Kafka cluster.
    ByokKey Pulumi.ConfluentCloud.Inputs.KafkaClusterByokKey
    Dedicated Pulumi.ConfluentCloud.Inputs.KafkaClusterDedicated
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    DisplayName string
    The name of the Kafka cluster.
    Enterprises List<Pulumi.ConfluentCloud.Inputs.KafkaClusterEnterprise>
    The configuration of the Enterprise Kafka cluster.
    Freights List<Pulumi.ConfluentCloud.Inputs.KafkaClusterFreight>
    The configuration of the Freight Kafka cluster.
    Network Pulumi.ConfluentCloud.Inputs.KafkaClusterNetwork
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    Standard Pulumi.ConfluentCloud.Inputs.KafkaClusterStandard
    The configuration of the Standard Kafka cluster.
    Availability string
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    Cloud string
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    Environment KafkaClusterEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    Region string
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    Basic KafkaClusterBasicArgs
    The configuration of the Basic Kafka cluster.
    ByokKey KafkaClusterByokKeyArgs
    Dedicated KafkaClusterDedicatedArgs
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    DisplayName string
    The name of the Kafka cluster.
    Enterprises []KafkaClusterEnterpriseArgs
    The configuration of the Enterprise Kafka cluster.
    Freights []KafkaClusterFreightArgs
    The configuration of the Freight Kafka cluster.
    Network KafkaClusterNetworkArgs
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    Standard KafkaClusterStandardArgs
    The configuration of the Standard Kafka cluster.
    availability String
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    cloud String
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    environment KafkaClusterEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    region String
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    basic KafkaClusterBasic
    The configuration of the Basic Kafka cluster.
    byokKey KafkaClusterByokKey
    dedicated KafkaClusterDedicated
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    displayName String
    The name of the Kafka cluster.
    enterprises List<KafkaClusterEnterprise>
    The configuration of the Enterprise Kafka cluster.
    freights List<KafkaClusterFreight>
    The configuration of the Freight Kafka cluster.
    network KafkaClusterNetwork
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    standard KafkaClusterStandard
    The configuration of the Standard Kafka cluster.
    availability string
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    cloud string
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    environment KafkaClusterEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    region string
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    basic KafkaClusterBasic
    The configuration of the Basic Kafka cluster.
    byokKey KafkaClusterByokKey
    dedicated KafkaClusterDedicated
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    displayName string
    The name of the Kafka cluster.
    enterprises KafkaClusterEnterprise[]
    The configuration of the Enterprise Kafka cluster.
    freights KafkaClusterFreight[]
    The configuration of the Freight Kafka cluster.
    network KafkaClusterNetwork
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    standard KafkaClusterStandard
    The configuration of the Standard Kafka cluster.
    availability str
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    cloud str
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    environment KafkaClusterEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    region str
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    basic KafkaClusterBasicArgs
    The configuration of the Basic Kafka cluster.
    byok_key KafkaClusterByokKeyArgs
    dedicated KafkaClusterDedicatedArgs
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    display_name str
    The name of the Kafka cluster.
    enterprises Sequence[KafkaClusterEnterpriseArgs]
    The configuration of the Enterprise Kafka cluster.
    freights Sequence[KafkaClusterFreightArgs]
    The configuration of the Freight Kafka cluster.
    network KafkaClusterNetworkArgs
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    standard KafkaClusterStandardArgs
    The configuration of the Standard Kafka cluster.
    availability String
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    cloud String
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    region String
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    basic Property Map
    The configuration of the Basic Kafka cluster.
    byokKey Property Map
    dedicated Property Map
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    displayName String
    The name of the Kafka cluster.
    enterprises List<Property Map>
    The configuration of the Enterprise Kafka cluster.
    freights List<Property Map>
    The configuration of the Freight Kafka cluster.
    network Property Map
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    standard Property Map
    The configuration of the Standard Kafka cluster.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the KafkaCluster resource produces the following output properties:

    ApiVersion string
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    BootstrapEndpoint string
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    Id string
    The provider-assigned unique ID for this managed resource.
    Kind string
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    RbacCrn string
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    RestEndpoint string
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    ApiVersion string
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    BootstrapEndpoint string
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    Id string
    The provider-assigned unique ID for this managed resource.
    Kind string
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    RbacCrn string
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    RestEndpoint string
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    apiVersion String
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    bootstrapEndpoint String
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    id String
    The provider-assigned unique ID for this managed resource.
    kind String
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    rbacCrn String
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    restEndpoint String
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    apiVersion string
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    bootstrapEndpoint string
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    id string
    The provider-assigned unique ID for this managed resource.
    kind string
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    rbacCrn string
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    restEndpoint string
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    api_version str
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    bootstrap_endpoint str
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    id str
    The provider-assigned unique ID for this managed resource.
    kind str
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    rbac_crn str
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    rest_endpoint str
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    apiVersion String
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    bootstrapEndpoint String
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    id String
    The provider-assigned unique ID for this managed resource.
    kind String
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    rbacCrn String
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    restEndpoint String
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).

    Look up Existing KafkaCluster Resource

    Get an existing KafkaCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: KafkaClusterState, opts?: CustomResourceOptions): KafkaCluster
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            api_version: Optional[str] = None,
            availability: Optional[str] = None,
            basic: Optional[KafkaClusterBasicArgs] = None,
            bootstrap_endpoint: Optional[str] = None,
            byok_key: Optional[KafkaClusterByokKeyArgs] = None,
            cloud: Optional[str] = None,
            dedicated: Optional[KafkaClusterDedicatedArgs] = None,
            display_name: Optional[str] = None,
            enterprises: Optional[Sequence[KafkaClusterEnterpriseArgs]] = None,
            environment: Optional[KafkaClusterEnvironmentArgs] = None,
            freights: Optional[Sequence[KafkaClusterFreightArgs]] = None,
            kind: Optional[str] = None,
            network: Optional[KafkaClusterNetworkArgs] = None,
            rbac_crn: Optional[str] = None,
            region: Optional[str] = None,
            rest_endpoint: Optional[str] = None,
            standard: Optional[KafkaClusterStandardArgs] = None) -> KafkaCluster
    func GetKafkaCluster(ctx *Context, name string, id IDInput, state *KafkaClusterState, opts ...ResourceOption) (*KafkaCluster, error)
    public static KafkaCluster Get(string name, Input<string> id, KafkaClusterState? state, CustomResourceOptions? opts = null)
    public static KafkaCluster get(String name, Output<String> id, KafkaClusterState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    ApiVersion string
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    Availability string
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    Basic Pulumi.ConfluentCloud.Inputs.KafkaClusterBasic
    The configuration of the Basic Kafka cluster.
    BootstrapEndpoint string
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    ByokKey Pulumi.ConfluentCloud.Inputs.KafkaClusterByokKey
    Cloud string
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    Dedicated Pulumi.ConfluentCloud.Inputs.KafkaClusterDedicated
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    DisplayName string
    The name of the Kafka cluster.
    Enterprises List<Pulumi.ConfluentCloud.Inputs.KafkaClusterEnterprise>
    The configuration of the Enterprise Kafka cluster.
    Environment Pulumi.ConfluentCloud.Inputs.KafkaClusterEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    Freights List<Pulumi.ConfluentCloud.Inputs.KafkaClusterFreight>
    The configuration of the Freight Kafka cluster.
    Kind string
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    Network Pulumi.ConfluentCloud.Inputs.KafkaClusterNetwork
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    RbacCrn string
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    Region string
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    RestEndpoint string
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    Standard Pulumi.ConfluentCloud.Inputs.KafkaClusterStandard
    The configuration of the Standard Kafka cluster.
    ApiVersion string
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    Availability string
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    Basic KafkaClusterBasicArgs
    The configuration of the Basic Kafka cluster.
    BootstrapEndpoint string
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    ByokKey KafkaClusterByokKeyArgs
    Cloud string
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    Dedicated KafkaClusterDedicatedArgs
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    DisplayName string
    The name of the Kafka cluster.
    Enterprises []KafkaClusterEnterpriseArgs
    The configuration of the Enterprise Kafka cluster.
    Environment KafkaClusterEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    Freights []KafkaClusterFreightArgs
    The configuration of the Freight Kafka cluster.
    Kind string
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    Network KafkaClusterNetworkArgs
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    RbacCrn string
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    Region string
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    RestEndpoint string
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    Standard KafkaClusterStandardArgs
    The configuration of the Standard Kafka cluster.
    apiVersion String
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    availability String
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    basic KafkaClusterBasic
    The configuration of the Basic Kafka cluster.
    bootstrapEndpoint String
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    byokKey KafkaClusterByokKey
    cloud String
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    dedicated KafkaClusterDedicated
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    displayName String
    The name of the Kafka cluster.
    enterprises List<KafkaClusterEnterprise>
    The configuration of the Enterprise Kafka cluster.
    environment KafkaClusterEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    freights List<KafkaClusterFreight>
    The configuration of the Freight Kafka cluster.
    kind String
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    network KafkaClusterNetwork
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    rbacCrn String
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    region String
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    restEndpoint String
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    standard KafkaClusterStandard
    The configuration of the Standard Kafka cluster.
    apiVersion string
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    availability string
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    basic KafkaClusterBasic
    The configuration of the Basic Kafka cluster.
    bootstrapEndpoint string
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    byokKey KafkaClusterByokKey
    cloud string
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    dedicated KafkaClusterDedicated
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    displayName string
    The name of the Kafka cluster.
    enterprises KafkaClusterEnterprise[]
    The configuration of the Enterprise Kafka cluster.
    environment KafkaClusterEnvironment
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    freights KafkaClusterFreight[]
    The configuration of the Freight Kafka cluster.
    kind string
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    network KafkaClusterNetwork
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    rbacCrn string
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    region string
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    restEndpoint string
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    standard KafkaClusterStandard
    The configuration of the Standard Kafka cluster.
    api_version str
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    availability str
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    basic KafkaClusterBasicArgs
    The configuration of the Basic Kafka cluster.
    bootstrap_endpoint str
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    byok_key KafkaClusterByokKeyArgs
    cloud str
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    dedicated KafkaClusterDedicatedArgs
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    display_name str
    The name of the Kafka cluster.
    enterprises Sequence[KafkaClusterEnterpriseArgs]
    The configuration of the Enterprise Kafka cluster.
    environment KafkaClusterEnvironmentArgs
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    freights Sequence[KafkaClusterFreightArgs]
    The configuration of the Freight Kafka cluster.
    kind str
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    network KafkaClusterNetworkArgs
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    rbac_crn str
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    region str
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    rest_endpoint str
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    standard KafkaClusterStandardArgs
    The configuration of the Standard Kafka cluster.
    apiVersion String
    (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
    availability String
    The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE, MULTI_ZONE, LOW, and HIGH.
    basic Property Map
    The configuration of the Basic Kafka cluster.
    bootstrapEndpoint String
    (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
    byokKey Property Map
    cloud String
    The cloud service provider that runs the Kafka cluster. Accepted values are: AWS, AZURE, and GCP.
    dedicated Property Map
    (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
    displayName String
    The name of the Kafka cluster.
    enterprises List<Property Map>
    The configuration of the Enterprise Kafka cluster.
    environment Property Map
    Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
    freights List<Property Map>
    The configuration of the Freight Kafka cluster.
    kind String
    (Required String) A kind of the Kafka cluster, for example, Cluster.
    network Property Map
    Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
    rbacCrn String
    (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
    region String
    The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
    restEndpoint String
    (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
    standard Property Map
    The configuration of the Standard Kafka cluster.

    Supporting Types

    KafkaClusterByokKey, KafkaClusterByokKeyArgs

    Id string
    The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
    Id string
    The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
    id String
    The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
    id string
    The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
    id str
    The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
    id String
    The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.

    KafkaClusterDedicated, KafkaClusterDedicatedArgs

    Cku int

    The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

    Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

    Note: The freight block is in an Early Access lifecycle stage.

    Note: The freight Kafka cluster type is only available in AWS currently.

    !> Warning: You can only upgrade clusters from basic to standard.

    Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

    EncryptionKey string
    The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
    Zones List<string>
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    Cku int

    The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

    Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

    Note: The freight block is in an Early Access lifecycle stage.

    Note: The freight Kafka cluster type is only available in AWS currently.

    !> Warning: You can only upgrade clusters from basic to standard.

    Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

    EncryptionKey string
    The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
    Zones []string
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    cku Integer

    The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

    Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

    Note: The freight block is in an Early Access lifecycle stage.

    Note: The freight Kafka cluster type is only available in AWS currently.

    !> Warning: You can only upgrade clusters from basic to standard.

    Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

    encryptionKey String
    The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
    zones List<String>
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    cku number

    The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

    Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

    Note: The freight block is in an Early Access lifecycle stage.

    Note: The freight Kafka cluster type is only available in AWS currently.

    !> Warning: You can only upgrade clusters from basic to standard.

    Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

    encryptionKey string
    The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
    zones string[]
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    cku int

    The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

    Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

    Note: The freight block is in an Early Access lifecycle stage.

    Note: The freight Kafka cluster type is only available in AWS currently.

    !> Warning: You can only upgrade clusters from basic to standard.

    Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

    encryption_key str
    The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
    zones Sequence[str]
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    cku Number

    The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for SINGLE_ZONE dedicated clusters is 1 whereas MULTI_ZONE dedicated clusters must have 2 CKUs or more.

    Note: Exactly one from the basic, standard, dedicated, enterprise or freight configuration blocks must be specified.

    Note: The freight block is in an Early Access lifecycle stage.

    Note: The freight Kafka cluster type is only available in AWS currently.

    !> Warning: You can only upgrade clusters from basic to standard.

    Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the pulumi up step to finish, you can exit it and import the cluster by using the pulumi import command once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.

    encryptionKey String
    The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
    zones List<String>
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.

    KafkaClusterEnvironment, KafkaClusterEnvironmentArgs

    Id string
    The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
    Id string
    The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
    id String
    The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
    id string
    The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
    id str
    The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
    id String
    The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.

    KafkaClusterFreight, KafkaClusterFreightArgs

    Zones List<string>
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    Zones []string
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    zones List<String>
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    zones string[]
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    zones Sequence[str]
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.
    zones List<String>
    (Required List of String) The list of zones the cluster is in.

    • On AWS, zones are AWS AZ IDs, for example, use1-az3.

    KafkaClusterNetwork, KafkaClusterNetworkArgs

    Id string
    The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
    Id string
    The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
    id String
    The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
    id string
    The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
    id str
    The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
    id String
    The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.

    Import

    You can import a Kafka cluster by using Environment ID and Kafka cluster ID, in the format <Environment ID>/<Kafka cluster ID>, e.g.

    $ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>"

    $ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"

    $ pulumi import confluentcloud:index/kafkaCluster:KafkaCluster my_kafka env-abc123/lkc-abc123
    

    !> Warning: Do not forget to delete terminal command history afterwards for security purposes.

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Confluent Cloud pulumi/pulumi-confluentcloud
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the confluent Terraform Provider.
    confluentcloud logo
    Confluent v1.48.0 published on Friday, Jun 21, 2024 by Pulumi