1. Packages
  2. Aiven
  3. API Docs
  4. Kafka
Aiven v6.18.0 published on Thursday, Jun 27, 2024 by Pulumi

aiven.Kafka

Explore with Pulumi AI

aiven logo
Aiven v6.18.0 published on Thursday, Jun 27, 2024 by Pulumi

    Creates and manages an Aiven for Apache Kafka® service.

    Example Usage

    import * as pulumi from "@pulumi/pulumi";
    import * as aiven from "@pulumi/aiven";
    
    const exampleKafka = new aiven.Kafka("example_kafka", {
        project: exampleProject.project,
        cloudName: "google-europe-west1",
        plan: "business-4",
        serviceName: "example-kafka",
        maintenanceWindowDow: "monday",
        maintenanceWindowTime: "10:00:00",
        kafkaUserConfig: {
            kafkaRest: true,
            kafkaConnect: true,
            schemaRegistry: true,
            kafkaVersion: "3.5",
            kafka: {
                groupMaxSessionTimeoutMs: 70000,
                logRetentionBytes: 1000000000,
            },
            publicAccess: {
                kafkaRest: true,
                kafkaConnect: true,
            },
        },
    });
    
    import pulumi
    import pulumi_aiven as aiven
    
    example_kafka = aiven.Kafka("example_kafka",
        project=example_project["project"],
        cloud_name="google-europe-west1",
        plan="business-4",
        service_name="example-kafka",
        maintenance_window_dow="monday",
        maintenance_window_time="10:00:00",
        kafka_user_config=aiven.KafkaKafkaUserConfigArgs(
            kafka_rest=True,
            kafka_connect=True,
            schema_registry=True,
            kafka_version="3.5",
            kafka=aiven.KafkaKafkaUserConfigKafkaArgs(
                group_max_session_timeout_ms=70000,
                log_retention_bytes=1000000000,
            ),
            public_access=aiven.KafkaKafkaUserConfigPublicAccessArgs(
                kafka_rest=True,
                kafka_connect=True,
            ),
        ))
    
    package main
    
    import (
    	"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
    	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
    )
    
    func main() {
    	pulumi.Run(func(ctx *pulumi.Context) error {
    		_, err := aiven.NewKafka(ctx, "example_kafka", &aiven.KafkaArgs{
    			Project:               pulumi.Any(exampleProject.Project),
    			CloudName:             pulumi.String("google-europe-west1"),
    			Plan:                  pulumi.String("business-4"),
    			ServiceName:           pulumi.String("example-kafka"),
    			MaintenanceWindowDow:  pulumi.String("monday"),
    			MaintenanceWindowTime: pulumi.String("10:00:00"),
    			KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
    				KafkaRest:      pulumi.Bool(true),
    				KafkaConnect:   pulumi.Bool(true),
    				SchemaRegistry: pulumi.Bool(true),
    				KafkaVersion:   pulumi.String("3.5"),
    				Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
    					GroupMaxSessionTimeoutMs: pulumi.Int(70000),
    					LogRetentionBytes:        pulumi.Int(1000000000),
    				},
    				PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
    					KafkaRest:    pulumi.Bool(true),
    					KafkaConnect: pulumi.Bool(true),
    				},
    			},
    		})
    		if err != nil {
    			return err
    		}
    		return nil
    	})
    }
    
    using System.Collections.Generic;
    using System.Linq;
    using Pulumi;
    using Aiven = Pulumi.Aiven;
    
    return await Deployment.RunAsync(() => 
    {
        var exampleKafka = new Aiven.Kafka("example_kafka", new()
        {
            Project = exampleProject.Project,
            CloudName = "google-europe-west1",
            Plan = "business-4",
            ServiceName = "example-kafka",
            MaintenanceWindowDow = "monday",
            MaintenanceWindowTime = "10:00:00",
            KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
            {
                KafkaRest = true,
                KafkaConnect = true,
                SchemaRegistry = true,
                KafkaVersion = "3.5",
                Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
                {
                    GroupMaxSessionTimeoutMs = 70000,
                    LogRetentionBytes = 1000000000,
                },
                PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
                {
                    KafkaRest = true,
                    KafkaConnect = true,
                },
            },
        });
    
    });
    
    package generated_program;
    
    import com.pulumi.Context;
    import com.pulumi.Pulumi;
    import com.pulumi.core.Output;
    import com.pulumi.aiven.Kafka;
    import com.pulumi.aiven.KafkaArgs;
    import com.pulumi.aiven.inputs.KafkaKafkaUserConfigArgs;
    import com.pulumi.aiven.inputs.KafkaKafkaUserConfigKafkaArgs;
    import com.pulumi.aiven.inputs.KafkaKafkaUserConfigPublicAccessArgs;
    import java.util.List;
    import java.util.ArrayList;
    import java.util.Map;
    import java.io.File;
    import java.nio.file.Files;
    import java.nio.file.Paths;
    
    public class App {
        public static void main(String[] args) {
            Pulumi.run(App::stack);
        }
    
        public static void stack(Context ctx) {
            var exampleKafka = new Kafka("exampleKafka", KafkaArgs.builder()
                .project(exampleProject.project())
                .cloudName("google-europe-west1")
                .plan("business-4")
                .serviceName("example-kafka")
                .maintenanceWindowDow("monday")
                .maintenanceWindowTime("10:00:00")
                .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
                    .kafkaRest(true)
                    .kafkaConnect(true)
                    .schemaRegistry(true)
                    .kafkaVersion("3.5")
                    .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
                        .groupMaxSessionTimeoutMs(70000)
                        .logRetentionBytes(1000000000)
                        .build())
                    .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
                        .kafkaRest(true)
                        .kafkaConnect(true)
                        .build())
                    .build())
                .build());
    
        }
    }
    
    resources:
      exampleKafka:
        type: aiven:Kafka
        name: example_kafka
        properties:
          project: ${exampleProject.project}
          cloudName: google-europe-west1
          plan: business-4
          serviceName: example-kafka
          maintenanceWindowDow: monday
          maintenanceWindowTime: 10:00:00
          kafkaUserConfig:
            kafkaRest: true
            kafkaConnect: true
            schemaRegistry: true
            kafkaVersion: '3.5'
            kafka:
              groupMaxSessionTimeoutMs: 70000
              logRetentionBytes: 1e+09
            publicAccess:
              kafkaRest: true
              kafkaConnect: true
    

    Create Kafka Resource

    Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

    Constructor syntax

    new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);
    @overload
    def Kafka(resource_name: str,
              args: KafkaArgs,
              opts: Optional[ResourceOptions] = None)
    
    @overload
    def Kafka(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              plan: Optional[str] = None,
              service_name: Optional[str] = None,
              project: Optional[str] = None,
              maintenance_window_time: Optional[str] = None,
              default_acl: Optional[bool] = None,
              kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
              karapace: Optional[bool] = None,
              maintenance_window_dow: Optional[str] = None,
              additional_disk_space: Optional[str] = None,
              disk_space: Optional[str] = None,
              kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
              project_vpc_id: Optional[str] = None,
              service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
              cloud_name: Optional[str] = None,
              static_ips: Optional[Sequence[str]] = None,
              tags: Optional[Sequence[KafkaTagArgs]] = None,
              tech_emails: Optional[Sequence[KafkaTechEmailArgs]] = None,
              termination_protection: Optional[bool] = None)
    func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)
    public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)
    public Kafka(String name, KafkaArgs args)
    public Kafka(String name, KafkaArgs args, CustomResourceOptions options)
    
    type: aiven:Kafka
    properties: # The arguments to resource properties.
    options: # Bag of options to control resource's behavior.
    
    

    Parameters

    name string
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    resource_name str
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts ResourceOptions
    Bag of options to control resource's behavior.
    ctx Context
    Context object for the current deployment.
    name string
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts ResourceOption
    Bag of options to control resource's behavior.
    name string
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    opts CustomResourceOptions
    Bag of options to control resource's behavior.
    name String
    The unique name of the resource.
    args KafkaArgs
    The arguments to resource properties.
    options CustomResourceOptions
    Bag of options to control resource's behavior.

    Constructor example

    The following reference example uses placeholder values for all input properties.

    var kafkaResource = new Aiven.Kafka("kafkaResource", new()
    {
        Plan = "string",
        ServiceName = "string",
        Project = "string",
        MaintenanceWindowTime = "string",
        DefaultAcl = false,
        KafkaServer = new[]
        {
            new Aiven.Inputs.KafkaKafkaArgs
            {
                AccessCert = "string",
                AccessKey = "string",
                ConnectUri = "string",
                RestUri = "string",
                SchemaRegistryUri = "string",
                Uris = new[]
                {
                    "string",
                },
            },
        },
        MaintenanceWindowDow = "string",
        AdditionalDiskSpace = "string",
        KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
        {
            AivenKafkaTopicMessages = false,
            CustomDomain = "string",
            IpFilterObjects = new[]
            {
                new Aiven.Inputs.KafkaKafkaUserConfigIpFilterObjectArgs
                {
                    Network = "string",
                    Description = "string",
                },
            },
            IpFilterStrings = new[]
            {
                "string",
            },
            Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
            {
                AutoCreateTopicsEnable = false,
                CompressionType = "string",
                ConnectionsMaxIdleMs = 0,
                DefaultReplicationFactor = 0,
                GroupInitialRebalanceDelayMs = 0,
                GroupMaxSessionTimeoutMs = 0,
                GroupMinSessionTimeoutMs = 0,
                LogCleanerDeleteRetentionMs = 0,
                LogCleanerMaxCompactionLagMs = 0,
                LogCleanerMinCleanableRatio = 0,
                LogCleanerMinCompactionLagMs = 0,
                LogCleanupPolicy = "string",
                LogFlushIntervalMessages = 0,
                LogFlushIntervalMs = 0,
                LogIndexIntervalBytes = 0,
                LogIndexSizeMaxBytes = 0,
                LogLocalRetentionBytes = 0,
                LogLocalRetentionMs = 0,
                LogMessageDownconversionEnable = false,
                LogMessageTimestampDifferenceMaxMs = 0,
                LogMessageTimestampType = "string",
                LogPreallocate = false,
                LogRetentionBytes = 0,
                LogRetentionHours = 0,
                LogRetentionMs = 0,
                LogRollJitterMs = 0,
                LogRollMs = 0,
                LogSegmentBytes = 0,
                LogSegmentDeleteDelayMs = 0,
                MaxConnectionsPerIp = 0,
                MaxIncrementalFetchSessionCacheSlots = 0,
                MessageMaxBytes = 0,
                MinInsyncReplicas = 0,
                NumPartitions = 0,
                OffsetsRetentionMinutes = 0,
                ProducerPurgatoryPurgeIntervalRequests = 0,
                ReplicaFetchMaxBytes = 0,
                ReplicaFetchResponseMaxBytes = 0,
                SaslOauthbearerExpectedAudience = "string",
                SaslOauthbearerExpectedIssuer = "string",
                SaslOauthbearerJwksEndpointUrl = "string",
                SaslOauthbearerSubClaimName = "string",
                SocketRequestMaxBytes = 0,
                TransactionPartitionVerificationEnable = false,
                TransactionRemoveExpiredTransactionCleanupIntervalMs = 0,
                TransactionStateLogSegmentBytes = 0,
            },
            KafkaAuthenticationMethods = new Aiven.Inputs.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs
            {
                Certificate = false,
                Sasl = false,
            },
            KafkaConnect = false,
            KafkaConnectConfig = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectConfigArgs
            {
                ConnectorClientConfigOverridePolicy = "string",
                ConsumerAutoOffsetReset = "string",
                ConsumerFetchMaxBytes = 0,
                ConsumerIsolationLevel = "string",
                ConsumerMaxPartitionFetchBytes = 0,
                ConsumerMaxPollIntervalMs = 0,
                ConsumerMaxPollRecords = 0,
                OffsetFlushIntervalMs = 0,
                OffsetFlushTimeoutMs = 0,
                ProducerBatchSize = 0,
                ProducerBufferMemory = 0,
                ProducerCompressionType = "string",
                ProducerLingerMs = 0,
                ProducerMaxRequestSize = 0,
                ScheduledRebalanceMaxDelayMs = 0,
                SessionTimeoutMs = 0,
            },
            KafkaConnectSecretProviders = new[]
            {
                new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs
                {
                    Name = "string",
                    Aws = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs
                    {
                        AuthMethod = "string",
                        Region = "string",
                        AccessKey = "string",
                        SecretKey = "string",
                    },
                    Vault = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs
                    {
                        Address = "string",
                        AuthMethod = "string",
                        EngineVersion = 0,
                        Token = "string",
                    },
                },
            },
            KafkaRest = false,
            KafkaRestAuthorization = false,
            KafkaRestConfig = new Aiven.Inputs.KafkaKafkaUserConfigKafkaRestConfigArgs
            {
                ConsumerEnableAutoCommit = false,
                ConsumerRequestMaxBytes = 0,
                ConsumerRequestTimeoutMs = 0,
                NameStrategy = "string",
                NameStrategyValidation = false,
                ProducerAcks = "string",
                ProducerCompressionType = "string",
                ProducerLingerMs = 0,
                ProducerMaxRequestSize = 0,
                SimpleconsumerPoolSizeMax = 0,
            },
            KafkaVersion = "string",
            LetsencryptSaslPrivatelink = false,
            PrivateAccess = new Aiven.Inputs.KafkaKafkaUserConfigPrivateAccessArgs
            {
                Kafka = false,
                KafkaConnect = false,
                KafkaRest = false,
                Prometheus = false,
                SchemaRegistry = false,
            },
            PrivatelinkAccess = new Aiven.Inputs.KafkaKafkaUserConfigPrivatelinkAccessArgs
            {
                Jolokia = false,
                Kafka = false,
                KafkaConnect = false,
                KafkaRest = false,
                Prometheus = false,
                SchemaRegistry = false,
            },
            PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
            {
                Kafka = false,
                KafkaConnect = false,
                KafkaRest = false,
                Prometheus = false,
                SchemaRegistry = false,
            },
            SchemaRegistry = false,
            SchemaRegistryConfig = new Aiven.Inputs.KafkaKafkaUserConfigSchemaRegistryConfigArgs
            {
                LeaderEligibility = false,
                TopicName = "string",
            },
            ServiceLog = false,
            StaticIps = false,
            TieredStorage = new Aiven.Inputs.KafkaKafkaUserConfigTieredStorageArgs
            {
                Enabled = false,
            },
        },
        ProjectVpcId = "string",
        ServiceIntegrations = new[]
        {
            new Aiven.Inputs.KafkaServiceIntegrationArgs
            {
                IntegrationType = "string",
                SourceServiceName = "string",
            },
        },
        CloudName = "string",
        StaticIps = new[]
        {
            "string",
        },
        Tags = new[]
        {
            new Aiven.Inputs.KafkaTagArgs
            {
                Key = "string",
                Value = "string",
            },
        },
        TechEmails = new[]
        {
            new Aiven.Inputs.KafkaTechEmailArgs
            {
                Email = "string",
            },
        },
        TerminationProtection = false,
    });
    
    example, err := aiven.NewKafka(ctx, "kafkaResource", &aiven.KafkaArgs{
    	Plan:                  pulumi.String("string"),
    	ServiceName:           pulumi.String("string"),
    	Project:               pulumi.String("string"),
    	MaintenanceWindowTime: pulumi.String("string"),
    	DefaultAcl:            pulumi.Bool(false),
    	Kafkas: aiven.KafkaKafkaArray{
    		&aiven.KafkaKafkaArgs{
    			AccessCert:        pulumi.String("string"),
    			AccessKey:         pulumi.String("string"),
    			ConnectUri:        pulumi.String("string"),
    			RestUri:           pulumi.String("string"),
    			SchemaRegistryUri: pulumi.String("string"),
    			Uris: pulumi.StringArray{
    				pulumi.String("string"),
    			},
    		},
    	},
    	MaintenanceWindowDow: pulumi.String("string"),
    	AdditionalDiskSpace:  pulumi.String("string"),
    	KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
    		AivenKafkaTopicMessages: pulumi.Bool(false),
    		CustomDomain:            pulumi.String("string"),
    		IpFilterObjects: aiven.KafkaKafkaUserConfigIpFilterObjectArray{
    			&aiven.KafkaKafkaUserConfigIpFilterObjectArgs{
    				Network:     pulumi.String("string"),
    				Description: pulumi.String("string"),
    			},
    		},
    		IpFilterStrings: pulumi.StringArray{
    			pulumi.String("string"),
    		},
    		Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
    			AutoCreateTopicsEnable:                               pulumi.Bool(false),
    			CompressionType:                                      pulumi.String("string"),
    			ConnectionsMaxIdleMs:                                 pulumi.Int(0),
    			DefaultReplicationFactor:                             pulumi.Int(0),
    			GroupInitialRebalanceDelayMs:                         pulumi.Int(0),
    			GroupMaxSessionTimeoutMs:                             pulumi.Int(0),
    			GroupMinSessionTimeoutMs:                             pulumi.Int(0),
    			LogCleanerDeleteRetentionMs:                          pulumi.Int(0),
    			LogCleanerMaxCompactionLagMs:                         pulumi.Int(0),
    			LogCleanerMinCleanableRatio:                          pulumi.Float64(0),
    			LogCleanerMinCompactionLagMs:                         pulumi.Int(0),
    			LogCleanupPolicy:                                     pulumi.String("string"),
    			LogFlushIntervalMessages:                             pulumi.Int(0),
    			LogFlushIntervalMs:                                   pulumi.Int(0),
    			LogIndexIntervalBytes:                                pulumi.Int(0),
    			LogIndexSizeMaxBytes:                                 pulumi.Int(0),
    			LogLocalRetentionBytes:                               pulumi.Int(0),
    			LogLocalRetentionMs:                                  pulumi.Int(0),
    			LogMessageDownconversionEnable:                       pulumi.Bool(false),
    			LogMessageTimestampDifferenceMaxMs:                   pulumi.Int(0),
    			LogMessageTimestampType:                              pulumi.String("string"),
    			LogPreallocate:                                       pulumi.Bool(false),
    			LogRetentionBytes:                                    pulumi.Int(0),
    			LogRetentionHours:                                    pulumi.Int(0),
    			LogRetentionMs:                                       pulumi.Int(0),
    			LogRollJitterMs:                                      pulumi.Int(0),
    			LogRollMs:                                            pulumi.Int(0),
    			LogSegmentBytes:                                      pulumi.Int(0),
    			LogSegmentDeleteDelayMs:                              pulumi.Int(0),
    			MaxConnectionsPerIp:                                  pulumi.Int(0),
    			MaxIncrementalFetchSessionCacheSlots:                 pulumi.Int(0),
    			MessageMaxBytes:                                      pulumi.Int(0),
    			MinInsyncReplicas:                                    pulumi.Int(0),
    			NumPartitions:                                        pulumi.Int(0),
    			OffsetsRetentionMinutes:                              pulumi.Int(0),
    			ProducerPurgatoryPurgeIntervalRequests:               pulumi.Int(0),
    			ReplicaFetchMaxBytes:                                 pulumi.Int(0),
    			ReplicaFetchResponseMaxBytes:                         pulumi.Int(0),
    			SaslOauthbearerExpectedAudience:                      pulumi.String("string"),
    			SaslOauthbearerExpectedIssuer:                        pulumi.String("string"),
    			SaslOauthbearerJwksEndpointUrl:                       pulumi.String("string"),
    			SaslOauthbearerSubClaimName:                          pulumi.String("string"),
    			SocketRequestMaxBytes:                                pulumi.Int(0),
    			TransactionPartitionVerificationEnable:               pulumi.Bool(false),
    			TransactionRemoveExpiredTransactionCleanupIntervalMs: pulumi.Int(0),
    			TransactionStateLogSegmentBytes:                      pulumi.Int(0),
    		},
    		KafkaAuthenticationMethods: &aiven.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs{
    			Certificate: pulumi.Bool(false),
    			Sasl:        pulumi.Bool(false),
    		},
    		KafkaConnect: pulumi.Bool(false),
    		KafkaConnectConfig: &aiven.KafkaKafkaUserConfigKafkaConnectConfigArgs{
    			ConnectorClientConfigOverridePolicy: pulumi.String("string"),
    			ConsumerAutoOffsetReset:             pulumi.String("string"),
    			ConsumerFetchMaxBytes:               pulumi.Int(0),
    			ConsumerIsolationLevel:              pulumi.String("string"),
    			ConsumerMaxPartitionFetchBytes:      pulumi.Int(0),
    			ConsumerMaxPollIntervalMs:           pulumi.Int(0),
    			ConsumerMaxPollRecords:              pulumi.Int(0),
    			OffsetFlushIntervalMs:               pulumi.Int(0),
    			OffsetFlushTimeoutMs:                pulumi.Int(0),
    			ProducerBatchSize:                   pulumi.Int(0),
    			ProducerBufferMemory:                pulumi.Int(0),
    			ProducerCompressionType:             pulumi.String("string"),
    			ProducerLingerMs:                    pulumi.Int(0),
    			ProducerMaxRequestSize:              pulumi.Int(0),
    			ScheduledRebalanceMaxDelayMs:        pulumi.Int(0),
    			SessionTimeoutMs:                    pulumi.Int(0),
    		},
    		KafkaConnectSecretProviders: aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArray{
    			&aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs{
    				Name: pulumi.String("string"),
    				Aws: &aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs{
    					AuthMethod: pulumi.String("string"),
    					Region:     pulumi.String("string"),
    					AccessKey:  pulumi.String("string"),
    					SecretKey:  pulumi.String("string"),
    				},
    				Vault: &aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs{
    					Address:       pulumi.String("string"),
    					AuthMethod:    pulumi.String("string"),
    					EngineVersion: pulumi.Int(0),
    					Token:         pulumi.String("string"),
    				},
    			},
    		},
    		KafkaRest:              pulumi.Bool(false),
    		KafkaRestAuthorization: pulumi.Bool(false),
    		KafkaRestConfig: &aiven.KafkaKafkaUserConfigKafkaRestConfigArgs{
    			ConsumerEnableAutoCommit:  pulumi.Bool(false),
    			ConsumerRequestMaxBytes:   pulumi.Int(0),
    			ConsumerRequestTimeoutMs:  pulumi.Int(0),
    			NameStrategy:              pulumi.String("string"),
    			NameStrategyValidation:    pulumi.Bool(false),
    			ProducerAcks:              pulumi.String("string"),
    			ProducerCompressionType:   pulumi.String("string"),
    			ProducerLingerMs:          pulumi.Int(0),
    			ProducerMaxRequestSize:    pulumi.Int(0),
    			SimpleconsumerPoolSizeMax: pulumi.Int(0),
    		},
    		KafkaVersion:               pulumi.String("string"),
    		LetsencryptSaslPrivatelink: pulumi.Bool(false),
    		PrivateAccess: &aiven.KafkaKafkaUserConfigPrivateAccessArgs{
    			Kafka:          pulumi.Bool(false),
    			KafkaConnect:   pulumi.Bool(false),
    			KafkaRest:      pulumi.Bool(false),
    			Prometheus:     pulumi.Bool(false),
    			SchemaRegistry: pulumi.Bool(false),
    		},
    		PrivatelinkAccess: &aiven.KafkaKafkaUserConfigPrivatelinkAccessArgs{
    			Jolokia:        pulumi.Bool(false),
    			Kafka:          pulumi.Bool(false),
    			KafkaConnect:   pulumi.Bool(false),
    			KafkaRest:      pulumi.Bool(false),
    			Prometheus:     pulumi.Bool(false),
    			SchemaRegistry: pulumi.Bool(false),
    		},
    		PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
    			Kafka:          pulumi.Bool(false),
    			KafkaConnect:   pulumi.Bool(false),
    			KafkaRest:      pulumi.Bool(false),
    			Prometheus:     pulumi.Bool(false),
    			SchemaRegistry: pulumi.Bool(false),
    		},
    		SchemaRegistry: pulumi.Bool(false),
    		SchemaRegistryConfig: &aiven.KafkaKafkaUserConfigSchemaRegistryConfigArgs{
    			LeaderEligibility: pulumi.Bool(false),
    			TopicName:         pulumi.String("string"),
    		},
    		ServiceLog: pulumi.Bool(false),
    		StaticIps:  pulumi.Bool(false),
    		TieredStorage: &aiven.KafkaKafkaUserConfigTieredStorageArgs{
    			Enabled: pulumi.Bool(false),
    		},
    	},
    	ProjectVpcId: pulumi.String("string"),
    	ServiceIntegrations: aiven.KafkaServiceIntegrationArray{
    		&aiven.KafkaServiceIntegrationArgs{
    			IntegrationType:   pulumi.String("string"),
    			SourceServiceName: pulumi.String("string"),
    		},
    	},
    	CloudName: pulumi.String("string"),
    	StaticIps: pulumi.StringArray{
    		pulumi.String("string"),
    	},
    	Tags: aiven.KafkaTagArray{
    		&aiven.KafkaTagArgs{
    			Key:   pulumi.String("string"),
    			Value: pulumi.String("string"),
    		},
    	},
    	TechEmails: aiven.KafkaTechEmailArray{
    		&aiven.KafkaTechEmailArgs{
    			Email: pulumi.String("string"),
    		},
    	},
    	TerminationProtection: pulumi.Bool(false),
    })
    
    var kafkaResource = new Kafka("kafkaResource", KafkaArgs.builder()
        .plan("string")
        .serviceName("string")
        .project("string")
        .maintenanceWindowTime("string")
        .defaultAcl(false)
        .kafkas(KafkaKafkaArgs.builder()
            .accessCert("string")
            .accessKey("string")
            .connectUri("string")
            .restUri("string")
            .schemaRegistryUri("string")
            .uris("string")
            .build())
        .maintenanceWindowDow("string")
        .additionalDiskSpace("string")
        .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
            .aivenKafkaTopicMessages(false)
            .customDomain("string")
            .ipFilterObjects(KafkaKafkaUserConfigIpFilterObjectArgs.builder()
                .network("string")
                .description("string")
                .build())
            .ipFilterStrings("string")
            .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
                .autoCreateTopicsEnable(false)
                .compressionType("string")
                .connectionsMaxIdleMs(0)
                .defaultReplicationFactor(0)
                .groupInitialRebalanceDelayMs(0)
                .groupMaxSessionTimeoutMs(0)
                .groupMinSessionTimeoutMs(0)
                .logCleanerDeleteRetentionMs(0)
                .logCleanerMaxCompactionLagMs(0)
                .logCleanerMinCleanableRatio(0)
                .logCleanerMinCompactionLagMs(0)
                .logCleanupPolicy("string")
                .logFlushIntervalMessages(0)
                .logFlushIntervalMs(0)
                .logIndexIntervalBytes(0)
                .logIndexSizeMaxBytes(0)
                .logLocalRetentionBytes(0)
                .logLocalRetentionMs(0)
                .logMessageDownconversionEnable(false)
                .logMessageTimestampDifferenceMaxMs(0)
                .logMessageTimestampType("string")
                .logPreallocate(false)
                .logRetentionBytes(0)
                .logRetentionHours(0)
                .logRetentionMs(0)
                .logRollJitterMs(0)
                .logRollMs(0)
                .logSegmentBytes(0)
                .logSegmentDeleteDelayMs(0)
                .maxConnectionsPerIp(0)
                .maxIncrementalFetchSessionCacheSlots(0)
                .messageMaxBytes(0)
                .minInsyncReplicas(0)
                .numPartitions(0)
                .offsetsRetentionMinutes(0)
                .producerPurgatoryPurgeIntervalRequests(0)
                .replicaFetchMaxBytes(0)
                .replicaFetchResponseMaxBytes(0)
                .saslOauthbearerExpectedAudience("string")
                .saslOauthbearerExpectedIssuer("string")
                .saslOauthbearerJwksEndpointUrl("string")
                .saslOauthbearerSubClaimName("string")
                .socketRequestMaxBytes(0)
                .transactionPartitionVerificationEnable(false)
                .transactionRemoveExpiredTransactionCleanupIntervalMs(0)
                .transactionStateLogSegmentBytes(0)
                .build())
            .kafkaAuthenticationMethods(KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs.builder()
                .certificate(false)
                .sasl(false)
                .build())
            .kafkaConnect(false)
            .kafkaConnectConfig(KafkaKafkaUserConfigKafkaConnectConfigArgs.builder()
                .connectorClientConfigOverridePolicy("string")
                .consumerAutoOffsetReset("string")
                .consumerFetchMaxBytes(0)
                .consumerIsolationLevel("string")
                .consumerMaxPartitionFetchBytes(0)
                .consumerMaxPollIntervalMs(0)
                .consumerMaxPollRecords(0)
                .offsetFlushIntervalMs(0)
                .offsetFlushTimeoutMs(0)
                .producerBatchSize(0)
                .producerBufferMemory(0)
                .producerCompressionType("string")
                .producerLingerMs(0)
                .producerMaxRequestSize(0)
                .scheduledRebalanceMaxDelayMs(0)
                .sessionTimeoutMs(0)
                .build())
            .kafkaConnectSecretProviders(KafkaKafkaUserConfigKafkaConnectSecretProviderArgs.builder()
                .name("string")
                .aws(KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs.builder()
                    .authMethod("string")
                    .region("string")
                    .accessKey("string")
                    .secretKey("string")
                    .build())
                .vault(KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs.builder()
                    .address("string")
                    .authMethod("string")
                    .engineVersion(0)
                    .token("string")
                    .build())
                .build())
            .kafkaRest(false)
            .kafkaRestAuthorization(false)
            .kafkaRestConfig(KafkaKafkaUserConfigKafkaRestConfigArgs.builder()
                .consumerEnableAutoCommit(false)
                .consumerRequestMaxBytes(0)
                .consumerRequestTimeoutMs(0)
                .nameStrategy("string")
                .nameStrategyValidation(false)
                .producerAcks("string")
                .producerCompressionType("string")
                .producerLingerMs(0)
                .producerMaxRequestSize(0)
                .simpleconsumerPoolSizeMax(0)
                .build())
            .kafkaVersion("string")
            .letsencryptSaslPrivatelink(false)
            .privateAccess(KafkaKafkaUserConfigPrivateAccessArgs.builder()
                .kafka(false)
                .kafkaConnect(false)
                .kafkaRest(false)
                .prometheus(false)
                .schemaRegistry(false)
                .build())
            .privatelinkAccess(KafkaKafkaUserConfigPrivatelinkAccessArgs.builder()
                .jolokia(false)
                .kafka(false)
                .kafkaConnect(false)
                .kafkaRest(false)
                .prometheus(false)
                .schemaRegistry(false)
                .build())
            .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
                .kafka(false)
                .kafkaConnect(false)
                .kafkaRest(false)
                .prometheus(false)
                .schemaRegistry(false)
                .build())
            .schemaRegistry(false)
            .schemaRegistryConfig(KafkaKafkaUserConfigSchemaRegistryConfigArgs.builder()
                .leaderEligibility(false)
                .topicName("string")
                .build())
            .serviceLog(false)
            .staticIps(false)
            .tieredStorage(KafkaKafkaUserConfigTieredStorageArgs.builder()
                .enabled(false)
                .build())
            .build())
        .projectVpcId("string")
        .serviceIntegrations(KafkaServiceIntegrationArgs.builder()
            .integrationType("string")
            .sourceServiceName("string")
            .build())
        .cloudName("string")
        .staticIps("string")
        .tags(KafkaTagArgs.builder()
            .key("string")
            .value("string")
            .build())
        .techEmails(KafkaTechEmailArgs.builder()
            .email("string")
            .build())
        .terminationProtection(false)
        .build());
    
    kafka_resource = aiven.Kafka("kafkaResource",
        plan="string",
        service_name="string",
        project="string",
        maintenance_window_time="string",
        default_acl=False,
        kafkas=[aiven.KafkaKafkaArgs(
            access_cert="string",
            access_key="string",
            connect_uri="string",
            rest_uri="string",
            schema_registry_uri="string",
            uris=["string"],
        )],
        maintenance_window_dow="string",
        additional_disk_space="string",
        kafka_user_config=aiven.KafkaKafkaUserConfigArgs(
            aiven_kafka_topic_messages=False,
            custom_domain="string",
            ip_filter_objects=[aiven.KafkaKafkaUserConfigIpFilterObjectArgs(
                network="string",
                description="string",
            )],
            ip_filter_strings=["string"],
            kafka=aiven.KafkaKafkaUserConfigKafkaArgs(
                auto_create_topics_enable=False,
                compression_type="string",
                connections_max_idle_ms=0,
                default_replication_factor=0,
                group_initial_rebalance_delay_ms=0,
                group_max_session_timeout_ms=0,
                group_min_session_timeout_ms=0,
                log_cleaner_delete_retention_ms=0,
                log_cleaner_max_compaction_lag_ms=0,
                log_cleaner_min_cleanable_ratio=0,
                log_cleaner_min_compaction_lag_ms=0,
                log_cleanup_policy="string",
                log_flush_interval_messages=0,
                log_flush_interval_ms=0,
                log_index_interval_bytes=0,
                log_index_size_max_bytes=0,
                log_local_retention_bytes=0,
                log_local_retention_ms=0,
                log_message_downconversion_enable=False,
                log_message_timestamp_difference_max_ms=0,
                log_message_timestamp_type="string",
                log_preallocate=False,
                log_retention_bytes=0,
                log_retention_hours=0,
                log_retention_ms=0,
                log_roll_jitter_ms=0,
                log_roll_ms=0,
                log_segment_bytes=0,
                log_segment_delete_delay_ms=0,
                max_connections_per_ip=0,
                max_incremental_fetch_session_cache_slots=0,
                message_max_bytes=0,
                min_insync_replicas=0,
                num_partitions=0,
                offsets_retention_minutes=0,
                producer_purgatory_purge_interval_requests=0,
                replica_fetch_max_bytes=0,
                replica_fetch_response_max_bytes=0,
                sasl_oauthbearer_expected_audience="string",
                sasl_oauthbearer_expected_issuer="string",
                sasl_oauthbearer_jwks_endpoint_url="string",
                sasl_oauthbearer_sub_claim_name="string",
                socket_request_max_bytes=0,
                transaction_partition_verification_enable=False,
                transaction_remove_expired_transaction_cleanup_interval_ms=0,
                transaction_state_log_segment_bytes=0,
            ),
            kafka_authentication_methods=aiven.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs(
                certificate=False,
                sasl=False,
            ),
            kafka_connect=False,
            kafka_connect_config=aiven.KafkaKafkaUserConfigKafkaConnectConfigArgs(
                connector_client_config_override_policy="string",
                consumer_auto_offset_reset="string",
                consumer_fetch_max_bytes=0,
                consumer_isolation_level="string",
                consumer_max_partition_fetch_bytes=0,
                consumer_max_poll_interval_ms=0,
                consumer_max_poll_records=0,
                offset_flush_interval_ms=0,
                offset_flush_timeout_ms=0,
                producer_batch_size=0,
                producer_buffer_memory=0,
                producer_compression_type="string",
                producer_linger_ms=0,
                producer_max_request_size=0,
                scheduled_rebalance_max_delay_ms=0,
                session_timeout_ms=0,
            ),
            kafka_connect_secret_providers=[aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs(
                name="string",
                aws=aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs(
                    auth_method="string",
                    region="string",
                    access_key="string",
                    secret_key="string",
                ),
                vault=aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs(
                    address="string",
                    auth_method="string",
                    engine_version=0,
                    token="string",
                ),
            )],
            kafka_rest=False,
            kafka_rest_authorization=False,
            kafka_rest_config=aiven.KafkaKafkaUserConfigKafkaRestConfigArgs(
                consumer_enable_auto_commit=False,
                consumer_request_max_bytes=0,
                consumer_request_timeout_ms=0,
                name_strategy="string",
                name_strategy_validation=False,
                producer_acks="string",
                producer_compression_type="string",
                producer_linger_ms=0,
                producer_max_request_size=0,
                simpleconsumer_pool_size_max=0,
            ),
            kafka_version="string",
            letsencrypt_sasl_privatelink=False,
            private_access=aiven.KafkaKafkaUserConfigPrivateAccessArgs(
                kafka=False,
                kafka_connect=False,
                kafka_rest=False,
                prometheus=False,
                schema_registry=False,
            ),
            privatelink_access=aiven.KafkaKafkaUserConfigPrivatelinkAccessArgs(
                jolokia=False,
                kafka=False,
                kafka_connect=False,
                kafka_rest=False,
                prometheus=False,
                schema_registry=False,
            ),
            public_access=aiven.KafkaKafkaUserConfigPublicAccessArgs(
                kafka=False,
                kafka_connect=False,
                kafka_rest=False,
                prometheus=False,
                schema_registry=False,
            ),
            schema_registry=False,
            schema_registry_config=aiven.KafkaKafkaUserConfigSchemaRegistryConfigArgs(
                leader_eligibility=False,
                topic_name="string",
            ),
            service_log=False,
            static_ips=False,
            tiered_storage=aiven.KafkaKafkaUserConfigTieredStorageArgs(
                enabled=False,
            ),
        ),
        project_vpc_id="string",
        service_integrations=[aiven.KafkaServiceIntegrationArgs(
            integration_type="string",
            source_service_name="string",
        )],
        cloud_name="string",
        static_ips=["string"],
        tags=[aiven.KafkaTagArgs(
            key="string",
            value="string",
        )],
        tech_emails=[aiven.KafkaTechEmailArgs(
            email="string",
        )],
        termination_protection=False)
    
    const kafkaResource = new aiven.Kafka("kafkaResource", {
        plan: "string",
        serviceName: "string",
        project: "string",
        maintenanceWindowTime: "string",
        defaultAcl: false,
        kafkas: [{
            accessCert: "string",
            accessKey: "string",
            connectUri: "string",
            restUri: "string",
            schemaRegistryUri: "string",
            uris: ["string"],
        }],
        maintenanceWindowDow: "string",
        additionalDiskSpace: "string",
        kafkaUserConfig: {
            aivenKafkaTopicMessages: false,
            customDomain: "string",
            ipFilterObjects: [{
                network: "string",
                description: "string",
            }],
            ipFilterStrings: ["string"],
            kafka: {
                autoCreateTopicsEnable: false,
                compressionType: "string",
                connectionsMaxIdleMs: 0,
                defaultReplicationFactor: 0,
                groupInitialRebalanceDelayMs: 0,
                groupMaxSessionTimeoutMs: 0,
                groupMinSessionTimeoutMs: 0,
                logCleanerDeleteRetentionMs: 0,
                logCleanerMaxCompactionLagMs: 0,
                logCleanerMinCleanableRatio: 0,
                logCleanerMinCompactionLagMs: 0,
                logCleanupPolicy: "string",
                logFlushIntervalMessages: 0,
                logFlushIntervalMs: 0,
                logIndexIntervalBytes: 0,
                logIndexSizeMaxBytes: 0,
                logLocalRetentionBytes: 0,
                logLocalRetentionMs: 0,
                logMessageDownconversionEnable: false,
                logMessageTimestampDifferenceMaxMs: 0,
                logMessageTimestampType: "string",
                logPreallocate: false,
                logRetentionBytes: 0,
                logRetentionHours: 0,
                logRetentionMs: 0,
                logRollJitterMs: 0,
                logRollMs: 0,
                logSegmentBytes: 0,
                logSegmentDeleteDelayMs: 0,
                maxConnectionsPerIp: 0,
                maxIncrementalFetchSessionCacheSlots: 0,
                messageMaxBytes: 0,
                minInsyncReplicas: 0,
                numPartitions: 0,
                offsetsRetentionMinutes: 0,
                producerPurgatoryPurgeIntervalRequests: 0,
                replicaFetchMaxBytes: 0,
                replicaFetchResponseMaxBytes: 0,
                saslOauthbearerExpectedAudience: "string",
                saslOauthbearerExpectedIssuer: "string",
                saslOauthbearerJwksEndpointUrl: "string",
                saslOauthbearerSubClaimName: "string",
                socketRequestMaxBytes: 0,
                transactionPartitionVerificationEnable: false,
                transactionRemoveExpiredTransactionCleanupIntervalMs: 0,
                transactionStateLogSegmentBytes: 0,
            },
            kafkaAuthenticationMethods: {
                certificate: false,
                sasl: false,
            },
            kafkaConnect: false,
            kafkaConnectConfig: {
                connectorClientConfigOverridePolicy: "string",
                consumerAutoOffsetReset: "string",
                consumerFetchMaxBytes: 0,
                consumerIsolationLevel: "string",
                consumerMaxPartitionFetchBytes: 0,
                consumerMaxPollIntervalMs: 0,
                consumerMaxPollRecords: 0,
                offsetFlushIntervalMs: 0,
                offsetFlushTimeoutMs: 0,
                producerBatchSize: 0,
                producerBufferMemory: 0,
                producerCompressionType: "string",
                producerLingerMs: 0,
                producerMaxRequestSize: 0,
                scheduledRebalanceMaxDelayMs: 0,
                sessionTimeoutMs: 0,
            },
            kafkaConnectSecretProviders: [{
                name: "string",
                aws: {
                    authMethod: "string",
                    region: "string",
                    accessKey: "string",
                    secretKey: "string",
                },
                vault: {
                    address: "string",
                    authMethod: "string",
                    engineVersion: 0,
                    token: "string",
                },
            }],
            kafkaRest: false,
            kafkaRestAuthorization: false,
            kafkaRestConfig: {
                consumerEnableAutoCommit: false,
                consumerRequestMaxBytes: 0,
                consumerRequestTimeoutMs: 0,
                nameStrategy: "string",
                nameStrategyValidation: false,
                producerAcks: "string",
                producerCompressionType: "string",
                producerLingerMs: 0,
                producerMaxRequestSize: 0,
                simpleconsumerPoolSizeMax: 0,
            },
            kafkaVersion: "string",
            letsencryptSaslPrivatelink: false,
            privateAccess: {
                kafka: false,
                kafkaConnect: false,
                kafkaRest: false,
                prometheus: false,
                schemaRegistry: false,
            },
            privatelinkAccess: {
                jolokia: false,
                kafka: false,
                kafkaConnect: false,
                kafkaRest: false,
                prometheus: false,
                schemaRegistry: false,
            },
            publicAccess: {
                kafka: false,
                kafkaConnect: false,
                kafkaRest: false,
                prometheus: false,
                schemaRegistry: false,
            },
            schemaRegistry: false,
            schemaRegistryConfig: {
                leaderEligibility: false,
                topicName: "string",
            },
            serviceLog: false,
            staticIps: false,
            tieredStorage: {
                enabled: false,
            },
        },
        projectVpcId: "string",
        serviceIntegrations: [{
            integrationType: "string",
            sourceServiceName: "string",
        }],
        cloudName: "string",
        staticIps: ["string"],
        tags: [{
            key: "string",
            value: "string",
        }],
        techEmails: [{
            email: "string",
        }],
        terminationProtection: false,
    });
    
    type: aiven:Kafka
    properties:
        additionalDiskSpace: string
        cloudName: string
        defaultAcl: false
        kafkaUserConfig:
            aivenKafkaTopicMessages: false
            customDomain: string
            ipFilterObjects:
                - description: string
                  network: string
            ipFilterStrings:
                - string
            kafka:
                autoCreateTopicsEnable: false
                compressionType: string
                connectionsMaxIdleMs: 0
                defaultReplicationFactor: 0
                groupInitialRebalanceDelayMs: 0
                groupMaxSessionTimeoutMs: 0
                groupMinSessionTimeoutMs: 0
                logCleanerDeleteRetentionMs: 0
                logCleanerMaxCompactionLagMs: 0
                logCleanerMinCleanableRatio: 0
                logCleanerMinCompactionLagMs: 0
                logCleanupPolicy: string
                logFlushIntervalMessages: 0
                logFlushIntervalMs: 0
                logIndexIntervalBytes: 0
                logIndexSizeMaxBytes: 0
                logLocalRetentionBytes: 0
                logLocalRetentionMs: 0
                logMessageDownconversionEnable: false
                logMessageTimestampDifferenceMaxMs: 0
                logMessageTimestampType: string
                logPreallocate: false
                logRetentionBytes: 0
                logRetentionHours: 0
                logRetentionMs: 0
                logRollJitterMs: 0
                logRollMs: 0
                logSegmentBytes: 0
                logSegmentDeleteDelayMs: 0
                maxConnectionsPerIp: 0
                maxIncrementalFetchSessionCacheSlots: 0
                messageMaxBytes: 0
                minInsyncReplicas: 0
                numPartitions: 0
                offsetsRetentionMinutes: 0
                producerPurgatoryPurgeIntervalRequests: 0
                replicaFetchMaxBytes: 0
                replicaFetchResponseMaxBytes: 0
                saslOauthbearerExpectedAudience: string
                saslOauthbearerExpectedIssuer: string
                saslOauthbearerJwksEndpointUrl: string
                saslOauthbearerSubClaimName: string
                socketRequestMaxBytes: 0
                transactionPartitionVerificationEnable: false
                transactionRemoveExpiredTransactionCleanupIntervalMs: 0
                transactionStateLogSegmentBytes: 0
            kafkaAuthenticationMethods:
                certificate: false
                sasl: false
            kafkaConnect: false
            kafkaConnectConfig:
                connectorClientConfigOverridePolicy: string
                consumerAutoOffsetReset: string
                consumerFetchMaxBytes: 0
                consumerIsolationLevel: string
                consumerMaxPartitionFetchBytes: 0
                consumerMaxPollIntervalMs: 0
                consumerMaxPollRecords: 0
                offsetFlushIntervalMs: 0
                offsetFlushTimeoutMs: 0
                producerBatchSize: 0
                producerBufferMemory: 0
                producerCompressionType: string
                producerLingerMs: 0
                producerMaxRequestSize: 0
                scheduledRebalanceMaxDelayMs: 0
                sessionTimeoutMs: 0
            kafkaConnectSecretProviders:
                - aws:
                    accessKey: string
                    authMethod: string
                    region: string
                    secretKey: string
                  name: string
                  vault:
                    address: string
                    authMethod: string
                    engineVersion: 0
                    token: string
            kafkaRest: false
            kafkaRestAuthorization: false
            kafkaRestConfig:
                consumerEnableAutoCommit: false
                consumerRequestMaxBytes: 0
                consumerRequestTimeoutMs: 0
                nameStrategy: string
                nameStrategyValidation: false
                producerAcks: string
                producerCompressionType: string
                producerLingerMs: 0
                producerMaxRequestSize: 0
                simpleconsumerPoolSizeMax: 0
            kafkaVersion: string
            letsencryptSaslPrivatelink: false
            privateAccess:
                kafka: false
                kafkaConnect: false
                kafkaRest: false
                prometheus: false
                schemaRegistry: false
            privatelinkAccess:
                jolokia: false
                kafka: false
                kafkaConnect: false
                kafkaRest: false
                prometheus: false
                schemaRegistry: false
            publicAccess:
                kafka: false
                kafkaConnect: false
                kafkaRest: false
                prometheus: false
                schemaRegistry: false
            schemaRegistry: false
            schemaRegistryConfig:
                leaderEligibility: false
                topicName: string
            serviceLog: false
            staticIps: false
            tieredStorage:
                enabled: false
        kafkas:
            - accessCert: string
              accessKey: string
              connectUri: string
              restUri: string
              schemaRegistryUri: string
              uris:
                - string
        maintenanceWindowDow: string
        maintenanceWindowTime: string
        plan: string
        project: string
        projectVpcId: string
        serviceIntegrations:
            - integrationType: string
              sourceServiceName: string
        serviceName: string
        staticIps:
            - string
        tags:
            - key: string
              value: string
        techEmails:
            - email: string
        terminationProtection: false
    

    Kafka Resource Properties

    To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

    Inputs

    The Kafka resource accepts the following input properties:

    Plan string
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    Project string
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    ServiceName string
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    AdditionalDiskSpace string
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    CloudName string
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    DefaultAcl bool
    Create a default wildcard Kafka ACL.
    DiskSpace string
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    KafkaServer List<KafkaKafka>
    Kafka server connection details.
    KafkaUserConfig KafkaKafkaUserConfig
    Kafka user configurable settings
    Karapace bool
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    MaintenanceWindowDow string
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    MaintenanceWindowTime string
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    ProjectVpcId string
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    ServiceIntegrations List<KafkaServiceIntegration>
    Service integrations to specify when creating a service. Not applied after initial service creation
    StaticIps List<string>
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    Tags List<KafkaTag>
    Tags are key-value pairs that allow you to categorize services.
    TechEmails List<KafkaTechEmail>
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    TerminationProtection bool
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    Plan string
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    Project string
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    ServiceName string
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    AdditionalDiskSpace string
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    CloudName string
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    DefaultAcl bool
    Create a default wildcard Kafka ACL.
    DiskSpace string
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    KafkaUserConfig KafkaKafkaUserConfigArgs
    Kafka user configurable settings
    Kafkas []KafkaKafkaArgs
    Kafka server connection details.
    Karapace bool
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    MaintenanceWindowDow string
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    MaintenanceWindowTime string
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    ProjectVpcId string
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    ServiceIntegrations []KafkaServiceIntegrationArgs
    Service integrations to specify when creating a service. Not applied after initial service creation
    StaticIps []string
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    Tags []KafkaTagArgs
    Tags are key-value pairs that allow you to categorize services.
    TechEmails []KafkaTechEmailArgs
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    TerminationProtection bool
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    plan String
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project String
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    serviceName String
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    additionalDiskSpace String
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloudName String
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    defaultAcl Boolean
    Create a default wildcard Kafka ACL.
    diskSpace String
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafkaUserConfig KafkaKafkaUserConfig
    Kafka user configurable settings
    kafkas List<KafkaKafka>
    Kafka server connection details.
    karapace Boolean
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenanceWindowDow String
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenanceWindowTime String
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    projectVpcId String
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    serviceIntegrations List<KafkaServiceIntegration>
    Service integrations to specify when creating a service. Not applied after initial service creation
    staticIps List<String>
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags List<KafkaTag>
    Tags are key-value pairs that allow you to categorize services.
    techEmails List<KafkaTechEmail>
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    terminationProtection Boolean
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    plan string
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project string
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    serviceName string
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    additionalDiskSpace string
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloudName string
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    defaultAcl boolean
    Create a default wildcard Kafka ACL.
    diskSpace string
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafkaUserConfig KafkaKafkaUserConfig
    Kafka user configurable settings
    kafkas KafkaKafka[]
    Kafka server connection details.
    karapace boolean
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenanceWindowDow string
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenanceWindowTime string
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    projectVpcId string
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    serviceIntegrations KafkaServiceIntegration[]
    Service integrations to specify when creating a service. Not applied after initial service creation
    staticIps string[]
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags KafkaTag[]
    Tags are key-value pairs that allow you to categorize services.
    techEmails KafkaTechEmail[]
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    terminationProtection boolean
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    plan str
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project str
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    service_name str
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    additional_disk_space str
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloud_name str
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    default_acl bool
    Create a default wildcard Kafka ACL.
    disk_space str
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafka_user_config KafkaKafkaUserConfigArgs
    Kafka user configurable settings
    kafkas Sequence[KafkaKafkaArgs]
    Kafka server connection details.
    karapace bool
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenance_window_dow str
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenance_window_time str
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    project_vpc_id str
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    service_integrations Sequence[KafkaServiceIntegrationArgs]
    Service integrations to specify when creating a service. Not applied after initial service creation
    static_ips Sequence[str]
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags Sequence[KafkaTagArgs]
    Tags are key-value pairs that allow you to categorize services.
    tech_emails Sequence[KafkaTechEmailArgs]
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    termination_protection bool
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    plan String
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project String
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    serviceName String
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    additionalDiskSpace String
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloudName String
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    defaultAcl Boolean
    Create a default wildcard Kafka ACL.
    diskSpace String
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    kafkaUserConfig Property Map
    Kafka user configurable settings
    kafkas List<Property Map>
    Kafka server connection details.
    karapace Boolean
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenanceWindowDow String
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenanceWindowTime String
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    projectVpcId String
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    serviceIntegrations List<Property Map>
    Service integrations to specify when creating a service. Not applied after initial service creation
    staticIps List<String>
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags List<Property Map>
    Tags are key-value pairs that allow you to categorize services.
    techEmails List<Property Map>
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    terminationProtection Boolean
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    Outputs

    All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:

    Components List<KafkaComponent>
    Service component information objects
    DiskSpaceCap string
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    DiskSpaceDefault string
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    DiskSpaceStep string
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    DiskSpaceUsed string
    Disk space that service is currently using
    Id string
    The provider-assigned unique ID for this managed resource.
    ServiceHost string
    The hostname of the service.
    ServicePassword string
    Password used for connecting to the service, if applicable
    ServicePort int
    The port of the service
    ServiceType string
    Aiven internal service type code
    ServiceUri string
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    ServiceUsername string
    Username used for connecting to the service, if applicable
    State string
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    Components []KafkaComponent
    Service component information objects
    DiskSpaceCap string
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    DiskSpaceDefault string
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    DiskSpaceStep string
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    DiskSpaceUsed string
    Disk space that service is currently using
    Id string
    The provider-assigned unique ID for this managed resource.
    ServiceHost string
    The hostname of the service.
    ServicePassword string
    Password used for connecting to the service, if applicable
    ServicePort int
    The port of the service
    ServiceType string
    Aiven internal service type code
    ServiceUri string
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    ServiceUsername string
    Username used for connecting to the service, if applicable
    State string
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    components List<KafkaComponent>
    Service component information objects
    diskSpaceCap String
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    diskSpaceDefault String
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    diskSpaceStep String
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    diskSpaceUsed String
    Disk space that service is currently using
    id String
    The provider-assigned unique ID for this managed resource.
    serviceHost String
    The hostname of the service.
    servicePassword String
    Password used for connecting to the service, if applicable
    servicePort Integer
    The port of the service
    serviceType String
    Aiven internal service type code
    serviceUri String
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    serviceUsername String
    Username used for connecting to the service, if applicable
    state String
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    components KafkaComponent[]
    Service component information objects
    diskSpaceCap string
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    diskSpaceDefault string
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    diskSpaceStep string
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    diskSpaceUsed string
    Disk space that service is currently using
    id string
    The provider-assigned unique ID for this managed resource.
    serviceHost string
    The hostname of the service.
    servicePassword string
    Password used for connecting to the service, if applicable
    servicePort number
    The port of the service
    serviceType string
    Aiven internal service type code
    serviceUri string
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    serviceUsername string
    Username used for connecting to the service, if applicable
    state string
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    components Sequence[KafkaComponent]
    Service component information objects
    disk_space_cap str
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    disk_space_default str
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    disk_space_step str
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    disk_space_used str
    Disk space that service is currently using
    id str
    The provider-assigned unique ID for this managed resource.
    service_host str
    The hostname of the service.
    service_password str
    Password used for connecting to the service, if applicable
    service_port int
    The port of the service
    service_type str
    Aiven internal service type code
    service_uri str
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    service_username str
    Username used for connecting to the service, if applicable
    state str
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    components List<Property Map>
    Service component information objects
    diskSpaceCap String
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    diskSpaceDefault String
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    diskSpaceStep String
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    diskSpaceUsed String
    Disk space that service is currently using
    id String
    The provider-assigned unique ID for this managed resource.
    serviceHost String
    The hostname of the service.
    servicePassword String
    Password used for connecting to the service, if applicable
    servicePort Number
    The port of the service
    serviceType String
    Aiven internal service type code
    serviceUri String
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    serviceUsername String
    Username used for connecting to the service, if applicable
    state String
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING

    Look up Existing Kafka Resource

    Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

    public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka
    @staticmethod
    def get(resource_name: str,
            id: str,
            opts: Optional[ResourceOptions] = None,
            additional_disk_space: Optional[str] = None,
            cloud_name: Optional[str] = None,
            components: Optional[Sequence[KafkaComponentArgs]] = None,
            default_acl: Optional[bool] = None,
            disk_space: Optional[str] = None,
            disk_space_cap: Optional[str] = None,
            disk_space_default: Optional[str] = None,
            disk_space_step: Optional[str] = None,
            disk_space_used: Optional[str] = None,
            kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
            kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
            karapace: Optional[bool] = None,
            maintenance_window_dow: Optional[str] = None,
            maintenance_window_time: Optional[str] = None,
            plan: Optional[str] = None,
            project: Optional[str] = None,
            project_vpc_id: Optional[str] = None,
            service_host: Optional[str] = None,
            service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
            service_name: Optional[str] = None,
            service_password: Optional[str] = None,
            service_port: Optional[int] = None,
            service_type: Optional[str] = None,
            service_uri: Optional[str] = None,
            service_username: Optional[str] = None,
            state: Optional[str] = None,
            static_ips: Optional[Sequence[str]] = None,
            tags: Optional[Sequence[KafkaTagArgs]] = None,
            tech_emails: Optional[Sequence[KafkaTechEmailArgs]] = None,
            termination_protection: Optional[bool] = None) -> Kafka
    func GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)
    public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)
    public static Kafka get(String name, Output<String> id, KafkaState state, CustomResourceOptions options)
    Resource lookup is not supported in YAML
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    resource_name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    name
    The unique name of the resulting resource.
    id
    The unique provider ID of the resource to lookup.
    state
    Any extra arguments used during the lookup.
    opts
    A bag of options that control this resource's behavior.
    The following state arguments are supported:
    AdditionalDiskSpace string
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    CloudName string
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    Components List<KafkaComponent>
    Service component information objects
    DefaultAcl bool
    Create a default wildcard Kafka ACL.
    DiskSpace string
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    DiskSpaceCap string
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    DiskSpaceDefault string
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    DiskSpaceStep string
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    DiskSpaceUsed string
    Disk space that service is currently using
    KafkaServer List<KafkaKafka>
    Kafka server connection details.
    KafkaUserConfig KafkaKafkaUserConfig
    Kafka user configurable settings
    Karapace bool
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    MaintenanceWindowDow string
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    MaintenanceWindowTime string
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    Plan string
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    Project string
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    ProjectVpcId string
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    ServiceHost string
    The hostname of the service.
    ServiceIntegrations List<KafkaServiceIntegration>
    Service integrations to specify when creating a service. Not applied after initial service creation
    ServiceName string
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    ServicePassword string
    Password used for connecting to the service, if applicable
    ServicePort int
    The port of the service
    ServiceType string
    Aiven internal service type code
    ServiceUri string
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    ServiceUsername string
    Username used for connecting to the service, if applicable
    State string
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    StaticIps List<string>
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    Tags List<KafkaTag>
    Tags are key-value pairs that allow you to categorize services.
    TechEmails List<KafkaTechEmail>
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    TerminationProtection bool
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    AdditionalDiskSpace string
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    CloudName string
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    Components []KafkaComponentArgs
    Service component information objects
    DefaultAcl bool
    Create a default wildcard Kafka ACL.
    DiskSpace string
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    DiskSpaceCap string
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    DiskSpaceDefault string
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    DiskSpaceStep string
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    DiskSpaceUsed string
    Disk space that service is currently using
    KafkaUserConfig KafkaKafkaUserConfigArgs
    Kafka user configurable settings
    Kafkas []KafkaKafkaArgs
    Kafka server connection details.
    Karapace bool
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    MaintenanceWindowDow string
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    MaintenanceWindowTime string
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    Plan string
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    Project string
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    ProjectVpcId string
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    ServiceHost string
    The hostname of the service.
    ServiceIntegrations []KafkaServiceIntegrationArgs
    Service integrations to specify when creating a service. Not applied after initial service creation
    ServiceName string
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    ServicePassword string
    Password used for connecting to the service, if applicable
    ServicePort int
    The port of the service
    ServiceType string
    Aiven internal service type code
    ServiceUri string
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    ServiceUsername string
    Username used for connecting to the service, if applicable
    State string
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    StaticIps []string
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    Tags []KafkaTagArgs
    Tags are key-value pairs that allow you to categorize services.
    TechEmails []KafkaTechEmailArgs
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    TerminationProtection bool
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    additionalDiskSpace String
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloudName String
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    components List<KafkaComponent>
    Service component information objects
    defaultAcl Boolean
    Create a default wildcard Kafka ACL.
    diskSpace String
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    diskSpaceCap String
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    diskSpaceDefault String
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    diskSpaceStep String
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    diskSpaceUsed String
    Disk space that service is currently using
    kafkaUserConfig KafkaKafkaUserConfig
    Kafka user configurable settings
    kafkas List<KafkaKafka>
    Kafka server connection details.
    karapace Boolean
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenanceWindowDow String
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenanceWindowTime String
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    plan String
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project String
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    projectVpcId String
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    serviceHost String
    The hostname of the service.
    serviceIntegrations List<KafkaServiceIntegration>
    Service integrations to specify when creating a service. Not applied after initial service creation
    serviceName String
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    servicePassword String
    Password used for connecting to the service, if applicable
    servicePort Integer
    The port of the service
    serviceType String
    Aiven internal service type code
    serviceUri String
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    serviceUsername String
    Username used for connecting to the service, if applicable
    state String
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    staticIps List<String>
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags List<KafkaTag>
    Tags are key-value pairs that allow you to categorize services.
    techEmails List<KafkaTechEmail>
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    terminationProtection Boolean
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    additionalDiskSpace string
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloudName string
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    components KafkaComponent[]
    Service component information objects
    defaultAcl boolean
    Create a default wildcard Kafka ACL.
    diskSpace string
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    diskSpaceCap string
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    diskSpaceDefault string
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    diskSpaceStep string
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    diskSpaceUsed string
    Disk space that service is currently using
    kafkaUserConfig KafkaKafkaUserConfig
    Kafka user configurable settings
    kafkas KafkaKafka[]
    Kafka server connection details.
    karapace boolean
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenanceWindowDow string
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenanceWindowTime string
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    plan string
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project string
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    projectVpcId string
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    serviceHost string
    The hostname of the service.
    serviceIntegrations KafkaServiceIntegration[]
    Service integrations to specify when creating a service. Not applied after initial service creation
    serviceName string
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    servicePassword string
    Password used for connecting to the service, if applicable
    servicePort number
    The port of the service
    serviceType string
    Aiven internal service type code
    serviceUri string
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    serviceUsername string
    Username used for connecting to the service, if applicable
    state string
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    staticIps string[]
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags KafkaTag[]
    Tags are key-value pairs that allow you to categorize services.
    techEmails KafkaTechEmail[]
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    terminationProtection boolean
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    additional_disk_space str
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloud_name str
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    components Sequence[KafkaComponentArgs]
    Service component information objects
    default_acl bool
    Create a default wildcard Kafka ACL.
    disk_space str
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    disk_space_cap str
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    disk_space_default str
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    disk_space_step str
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    disk_space_used str
    Disk space that service is currently using
    kafka_user_config KafkaKafkaUserConfigArgs
    Kafka user configurable settings
    kafkas Sequence[KafkaKafkaArgs]
    Kafka server connection details.
    karapace bool
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenance_window_dow str
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenance_window_time str
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    plan str
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project str
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    project_vpc_id str
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    service_host str
    The hostname of the service.
    service_integrations Sequence[KafkaServiceIntegrationArgs]
    Service integrations to specify when creating a service. Not applied after initial service creation
    service_name str
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    service_password str
    Password used for connecting to the service, if applicable
    service_port int
    The port of the service
    service_type str
    Aiven internal service type code
    service_uri str
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    service_username str
    Username used for connecting to the service, if applicable
    state str
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    static_ips Sequence[str]
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags Sequence[KafkaTagArgs]
    Tags are key-value pairs that allow you to categorize services.
    tech_emails Sequence[KafkaTechEmailArgs]
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    termination_protection bool
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
    additionalDiskSpace String
    Additional disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.
    cloudName String
    Defines where the cloud provider and region where the service is hosted in. This can be changed freely after service is created. Changing the value will trigger a potentially lengthy migration process for the service. Format is cloud provider name (aws, azure, do google, upcloud, etc.), dash, and the cloud provider specific region name. These are documented on each Cloud provider's own support articles, like here for Google and here for AWS.
    components List<Property Map>
    Service component information objects
    defaultAcl Boolean
    Create a default wildcard Kafka ACL.
    diskSpace String
    Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

    Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

    diskSpaceCap String
    The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
    diskSpaceDefault String
    The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
    diskSpaceStep String
    The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
    diskSpaceUsed String
    Disk space that service is currently using
    kafkaUserConfig Property Map
    Kafka user configurable settings
    kafkas List<Property Map>
    Kafka server connection details.
    karapace Boolean
    Switch the service to use Karapace for schema registry and REST proxy.

    Deprecated: Usage of this field is discouraged.

    maintenanceWindowDow String
    Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
    maintenanceWindowTime String
    Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
    plan String
    Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seem from the Aiven pricing page.
    project String
    The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
    projectVpcId String
    Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
    serviceHost String
    The hostname of the service.
    serviceIntegrations List<Property Map>
    Service integrations to specify when creating a service. Not applied after initial service creation
    serviceName String
    Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
    servicePassword String
    Password used for connecting to the service, if applicable
    servicePort Number
    The port of the service
    serviceType String
    Aiven internal service type code
    serviceUri String
    URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
    serviceUsername String
    Username used for connecting to the service, if applicable
    state String
    Service state. One of POWEROFF, REBALANCING, REBUILDING or RUNNING
    staticIps List<String>
    Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
    tags List<Property Map>
    Tags are key-value pairs that allow you to categorize services.
    techEmails List<Property Map>
    The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
    terminationProtection Boolean
    Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

    Supporting Types

    KafkaComponent, KafkaComponentArgs

    Component string
    Service component name
    ConnectionUri string
    Connection info for connecting to the service component. This is a combination of host and port.
    Host string
    Host name for connecting to the service component
    KafkaAuthenticationMethod string
    Kafka authentication method. This is a value specific to the 'kafka' service component
    Port int
    Port number for connecting to the service component
    Route string
    Network access route
    Ssl bool
    Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
    Usage string
    DNS usage name
    Component string
    Service component name
    ConnectionUri string
    Connection info for connecting to the service component. This is a combination of host and port.
    Host string
    Host name for connecting to the service component
    KafkaAuthenticationMethod string
    Kafka authentication method. This is a value specific to the 'kafka' service component
    Port int
    Port number for connecting to the service component
    Route string
    Network access route
    Ssl bool
    Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
    Usage string
    DNS usage name
    component String
    Service component name
    connectionUri String
    Connection info for connecting to the service component. This is a combination of host and port.
    host String
    Host name for connecting to the service component
    kafkaAuthenticationMethod String
    Kafka authentication method. This is a value specific to the 'kafka' service component
    port Integer
    Port number for connecting to the service component
    route String
    Network access route
    ssl Boolean
    Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
    usage String
    DNS usage name
    component string
    Service component name
    connectionUri string
    Connection info for connecting to the service component. This is a combination of host and port.
    host string
    Host name for connecting to the service component
    kafkaAuthenticationMethod string
    Kafka authentication method. This is a value specific to the 'kafka' service component
    port number
    Port number for connecting to the service component
    route string
    Network access route
    ssl boolean
    Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
    usage string
    DNS usage name
    component str
    Service component name
    connection_uri str
    Connection info for connecting to the service component. This is a combination of host and port.
    host str
    Host name for connecting to the service component
    kafka_authentication_method str
    Kafka authentication method. This is a value specific to the 'kafka' service component
    port int
    Port number for connecting to the service component
    route str
    Network access route
    ssl bool
    Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
    usage str
    DNS usage name
    component String
    Service component name
    connectionUri String
    Connection info for connecting to the service component. This is a combination of host and port.
    host String
    Host name for connecting to the service component
    kafkaAuthenticationMethod String
    Kafka authentication method. This is a value specific to the 'kafka' service component
    port Number
    Port number for connecting to the service component
    route String
    Network access route
    ssl Boolean
    Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
    usage String
    DNS usage name

    KafkaKafka, KafkaKafkaArgs

    AccessCert string
    The Kafka client certificate.
    AccessKey string
    The Kafka client certificate key.
    ConnectUri string
    The Kafka Connect URI.
    RestUri string
    The Kafka REST URI.
    SchemaRegistryUri string
    The Schema Registry URI.
    Uris List<string>
    Kafka server URIs.
    AccessCert string
    The Kafka client certificate.
    AccessKey string
    The Kafka client certificate key.
    ConnectUri string
    The Kafka Connect URI.
    RestUri string
    The Kafka REST URI.
    SchemaRegistryUri string
    The Schema Registry URI.
    Uris []string
    Kafka server URIs.
    accessCert String
    The Kafka client certificate.
    accessKey String
    The Kafka client certificate key.
    connectUri String
    The Kafka Connect URI.
    restUri String
    The Kafka REST URI.
    schemaRegistryUri String
    The Schema Registry URI.
    uris List<String>
    Kafka server URIs.
    accessCert string
    The Kafka client certificate.
    accessKey string
    The Kafka client certificate key.
    connectUri string
    The Kafka Connect URI.
    restUri string
    The Kafka REST URI.
    schemaRegistryUri string
    The Schema Registry URI.
    uris string[]
    Kafka server URIs.
    access_cert str
    The Kafka client certificate.
    access_key str
    The Kafka client certificate key.
    connect_uri str
    The Kafka Connect URI.
    rest_uri str
    The Kafka REST URI.
    schema_registry_uri str
    The Schema Registry URI.
    uris Sequence[str]
    Kafka server URIs.
    accessCert String
    The Kafka client certificate.
    accessKey String
    The Kafka client certificate key.
    connectUri String
    The Kafka Connect URI.
    restUri String
    The Kafka REST URI.
    schemaRegistryUri String
    The Schema Registry URI.
    uris List<String>
    Kafka server URIs.

    KafkaKafkaUserConfig, KafkaKafkaUserConfigArgs

    AdditionalBackupRegions string
    Additional Cloud Regions for Backup Replication.

    Deprecated: This property is deprecated.

    AivenKafkaTopicMessages bool
    Allow access to read Kafka topic messages in the Aiven Console and REST API.
    CustomDomain string
    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
    IpFilterObjects List<KafkaKafkaUserConfigIpFilterObject>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
    IpFilterStrings List<string>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
    IpFilters List<string>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

    Deprecated: Deprecated. Use ip_filter_string instead.

    Kafka KafkaKafkaUserConfigKafka
    Kafka broker configuration values
    KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
    Kafka authentication methods
    KafkaConnect bool
    Enable Kafka Connect service. Default: false.
    KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
    Kafka Connect configuration values
    KafkaConnectSecretProviders List<KafkaKafkaUserConfigKafkaConnectSecretProvider>
    KafkaRest bool
    Enable Kafka-REST service. Default: false.
    KafkaRestAuthorization bool
    Enable authorization in Kafka-REST service.
    KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
    Kafka REST configuration
    KafkaVersion string
    Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, and newer. Kafka major version.
    LetsencryptSaslPrivatelink bool
    Use Letsencrypt CA for Kafka SASL via Privatelink.
    PrivateAccess KafkaKafkaUserConfigPrivateAccess
    Allow access to selected service ports from private networks
    PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
    Allow access to selected service components through Privatelink
    PublicAccess KafkaKafkaUserConfigPublicAccess
    Allow access to selected service ports from the public Internet
    SchemaRegistry bool
    Enable Schema-Registry service. Default: false.
    SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
    Schema Registry configuration
    ServiceLog bool
    Store logs for the service so that they are available in the HTTP API and console.
    StaticIps bool
    Use static public IP addresses.
    TieredStorage KafkaKafkaUserConfigTieredStorage
    Tiered storage configuration
    AdditionalBackupRegions string
    Additional Cloud Regions for Backup Replication.

    Deprecated: This property is deprecated.

    AivenKafkaTopicMessages bool
    Allow access to read Kafka topic messages in the Aiven Console and REST API.
    CustomDomain string
    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
    IpFilterObjects []KafkaKafkaUserConfigIpFilterObject
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
    IpFilterStrings []string
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
    IpFilters []string
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

    Deprecated: Deprecated. Use ip_filter_string instead.

    Kafka KafkaKafkaUserConfigKafka
    Kafka broker configuration values
    KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
    Kafka authentication methods
    KafkaConnect bool
    Enable Kafka Connect service. Default: false.
    KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
    Kafka Connect configuration values
    KafkaConnectSecretProviders []KafkaKafkaUserConfigKafkaConnectSecretProvider
    KafkaRest bool
    Enable Kafka-REST service. Default: false.
    KafkaRestAuthorization bool
    Enable authorization in Kafka-REST service.
    KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
    Kafka REST configuration
    KafkaVersion string
    Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, and newer. Kafka major version.
    LetsencryptSaslPrivatelink bool
    Use Letsencrypt CA for Kafka SASL via Privatelink.
    PrivateAccess KafkaKafkaUserConfigPrivateAccess
    Allow access to selected service ports from private networks
    PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
    Allow access to selected service components through Privatelink
    PublicAccess KafkaKafkaUserConfigPublicAccess
    Allow access to selected service ports from the public Internet
    SchemaRegistry bool
    Enable Schema-Registry service. Default: false.
    SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
    Schema Registry configuration
    ServiceLog bool
    Store logs for the service so that they are available in the HTTP API and console.
    StaticIps bool
    Use static public IP addresses.
    TieredStorage KafkaKafkaUserConfigTieredStorage
    Tiered storage configuration
    additionalBackupRegions String
    Additional Cloud Regions for Backup Replication.

    Deprecated: This property is deprecated.

    aivenKafkaTopicMessages Boolean
    Allow access to read Kafka topic messages in the Aiven Console and REST API.
    customDomain String
    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
    ipFilterObjects List<KafkaKafkaUserConfigIpFilterObject>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
    ipFilterStrings List<String>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
    ipFilters List<String>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

    Deprecated: Deprecated. Use ip_filter_string instead.

    kafka KafkaKafkaUserConfigKafka
    Kafka broker configuration values
    kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
    Kafka authentication methods
    kafkaConnect Boolean
    Enable Kafka Connect service. Default: false.
    kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
    Kafka Connect configuration values
    kafkaConnectSecretProviders List<KafkaKafkaUserConfigKafkaConnectSecretProvider>
    kafkaRest Boolean
    Enable Kafka-REST service. Default: false.
    kafkaRestAuthorization Boolean
    Enable authorization in Kafka-REST service.
    kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
    Kafka REST configuration
    kafkaVersion String
    Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, and newer. Kafka major version.
    letsencryptSaslPrivatelink Boolean
    Use Letsencrypt CA for Kafka SASL via Privatelink.
    privateAccess KafkaKafkaUserConfigPrivateAccess
    Allow access to selected service ports from private networks
    privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
    Allow access to selected service components through Privatelink
    publicAccess KafkaKafkaUserConfigPublicAccess
    Allow access to selected service ports from the public Internet
    schemaRegistry Boolean
    Enable Schema-Registry service. Default: false.
    schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
    Schema Registry configuration
    serviceLog Boolean
    Store logs for the service so that they are available in the HTTP API and console.
    staticIps Boolean
    Use static public IP addresses.
    tieredStorage KafkaKafkaUserConfigTieredStorage
    Tiered storage configuration
    additionalBackupRegions string
    Additional Cloud Regions for Backup Replication.

    Deprecated: This property is deprecated.

    aivenKafkaTopicMessages boolean
    Allow access to read Kafka topic messages in the Aiven Console and REST API.
    customDomain string
    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
    ipFilterObjects KafkaKafkaUserConfigIpFilterObject[]
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
    ipFilterStrings string[]
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
    ipFilters string[]
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

    Deprecated: Deprecated. Use ip_filter_string instead.

    kafka KafkaKafkaUserConfigKafka
    Kafka broker configuration values
    kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
    Kafka authentication methods
    kafkaConnect boolean
    Enable Kafka Connect service. Default: false.
    kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
    Kafka Connect configuration values
    kafkaConnectSecretProviders KafkaKafkaUserConfigKafkaConnectSecretProvider[]
    kafkaRest boolean
    Enable Kafka-REST service. Default: false.
    kafkaRestAuthorization boolean
    Enable authorization in Kafka-REST service.
    kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
    Kafka REST configuration
    kafkaVersion string
    Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, and newer. Kafka major version.
    letsencryptSaslPrivatelink boolean
    Use Letsencrypt CA for Kafka SASL via Privatelink.
    privateAccess KafkaKafkaUserConfigPrivateAccess
    Allow access to selected service ports from private networks
    privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
    Allow access to selected service components through Privatelink
    publicAccess KafkaKafkaUserConfigPublicAccess
    Allow access to selected service ports from the public Internet
    schemaRegistry boolean
    Enable Schema-Registry service. Default: false.
    schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
    Schema Registry configuration
    serviceLog boolean
    Store logs for the service so that they are available in the HTTP API and console.
    staticIps boolean
    Use static public IP addresses.
    tieredStorage KafkaKafkaUserConfigTieredStorage
    Tiered storage configuration
    additional_backup_regions str
    Additional Cloud Regions for Backup Replication.

    Deprecated: This property is deprecated.

    aiven_kafka_topic_messages bool
    Allow access to read Kafka topic messages in the Aiven Console and REST API.
    custom_domain str
    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
    ip_filter_objects Sequence[KafkaKafkaUserConfigIpFilterObject]
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
    ip_filter_strings Sequence[str]
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
    ip_filters Sequence[str]
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

    Deprecated: Deprecated. Use ip_filter_string instead.

    kafka KafkaKafkaUserConfigKafka
    Kafka broker configuration values
    kafka_authentication_methods KafkaKafkaUserConfigKafkaAuthenticationMethods
    Kafka authentication methods
    kafka_connect bool
    Enable Kafka Connect service. Default: false.
    kafka_connect_config KafkaKafkaUserConfigKafkaConnectConfig
    Kafka Connect configuration values
    kafka_connect_secret_providers Sequence[KafkaKafkaUserConfigKafkaConnectSecretProvider]
    kafka_rest bool
    Enable Kafka-REST service. Default: false.
    kafka_rest_authorization bool
    Enable authorization in Kafka-REST service.
    kafka_rest_config KafkaKafkaUserConfigKafkaRestConfig
    Kafka REST configuration
    kafka_version str
    Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, and newer. Kafka major version.
    letsencrypt_sasl_privatelink bool
    Use Letsencrypt CA for Kafka SASL via Privatelink.
    private_access KafkaKafkaUserConfigPrivateAccess
    Allow access to selected service ports from private networks
    privatelink_access KafkaKafkaUserConfigPrivatelinkAccess
    Allow access to selected service components through Privatelink
    public_access KafkaKafkaUserConfigPublicAccess
    Allow access to selected service ports from the public Internet
    schema_registry bool
    Enable Schema-Registry service. Default: false.
    schema_registry_config KafkaKafkaUserConfigSchemaRegistryConfig
    Schema Registry configuration
    service_log bool
    Store logs for the service so that they are available in the HTTP API and console.
    static_ips bool
    Use static public IP addresses.
    tiered_storage KafkaKafkaUserConfigTieredStorage
    Tiered storage configuration
    additionalBackupRegions String
    Additional Cloud Regions for Backup Replication.

    Deprecated: This property is deprecated.

    aivenKafkaTopicMessages Boolean
    Allow access to read Kafka topic messages in the Aiven Console and REST API.
    customDomain String
    Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
    ipFilterObjects List<Property Map>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
    ipFilterStrings List<String>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
    ipFilters List<String>
    Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

    Deprecated: Deprecated. Use ip_filter_string instead.

    kafka Property Map
    Kafka broker configuration values
    kafkaAuthenticationMethods Property Map
    Kafka authentication methods
    kafkaConnect Boolean
    Enable Kafka Connect service. Default: false.
    kafkaConnectConfig Property Map
    Kafka Connect configuration values
    kafkaConnectSecretProviders List<Property Map>
    kafkaRest Boolean
    Enable Kafka-REST service. Default: false.
    kafkaRestAuthorization Boolean
    Enable authorization in Kafka-REST service.
    kafkaRestConfig Property Map
    Kafka REST configuration
    kafkaVersion String
    Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, and newer. Kafka major version.
    letsencryptSaslPrivatelink Boolean
    Use Letsencrypt CA for Kafka SASL via Privatelink.
    privateAccess Property Map
    Allow access to selected service ports from private networks
    privatelinkAccess Property Map
    Allow access to selected service components through Privatelink
    publicAccess Property Map
    Allow access to selected service ports from the public Internet
    schemaRegistry Boolean
    Enable Schema-Registry service. Default: false.
    schemaRegistryConfig Property Map
    Schema Registry configuration
    serviceLog Boolean
    Store logs for the service so that they are available in the HTTP API and console.
    staticIps Boolean
    Use static public IP addresses.
    tieredStorage Property Map
    Tiered storage configuration

    KafkaKafkaUserConfigIpFilterObject, KafkaKafkaUserConfigIpFilterObjectArgs

    Network string
    CIDR address block. Example: 10.20.0.0/16.
    Description string
    Description for IP filter list entry. Example: Production service IP range.
    Network string
    CIDR address block. Example: 10.20.0.0/16.
    Description string
    Description for IP filter list entry. Example: Production service IP range.
    network String
    CIDR address block. Example: 10.20.0.0/16.
    description String
    Description for IP filter list entry. Example: Production service IP range.
    network string
    CIDR address block. Example: 10.20.0.0/16.
    description string
    Description for IP filter list entry. Example: Production service IP range.
    network str
    CIDR address block. Example: 10.20.0.0/16.
    description str
    Description for IP filter list entry. Example: Production service IP range.
    network String
    CIDR address block. Example: 10.20.0.0/16.
    description String
    Description for IP filter list entry. Example: Production service IP range.

    KafkaKafkaUserConfigKafka, KafkaKafkaUserConfigKafkaArgs

    AutoCreateTopicsEnable bool
    Enable auto creation of topics.
    CompressionType string
    Enum: gzip, snappy, lz4, zstd, uncompressed, producer. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.
    ConnectionsMaxIdleMs int
    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: 540000.
    DefaultReplicationFactor int
    Replication factor for autocreated topics.
    GroupInitialRebalanceDelayMs int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: 3000.
    GroupMaxSessionTimeoutMs int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 1800000.
    GroupMinSessionTimeoutMs int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 6000.
    LogCleanerDeleteRetentionMs int
    How long are delete records retained? Example: 86400000.
    LogCleanerMaxCompactionLagMs int
    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
    LogCleanerMinCleanableRatio double
    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: 0.5.
    LogCleanerMinCompactionLagMs int
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    LogCleanupPolicy string
    Enum: delete, compact, compact,delete. The default cleanup policy for segments beyond the retention window.
    LogFlushIntervalMessages int
    The number of messages accumulated on a log partition before messages are flushed to disk. Example: 9223372036854775807.
    LogFlushIntervalMs int
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    LogIndexIntervalBytes int
    The interval with which Kafka adds an entry to the offset index. Example: 4096.
    LogIndexSizeMaxBytes int
    The maximum size in bytes of the offset index. Example: 10485760.
    LogLocalRetentionBytes int
    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
    LogLocalRetentionMs int
    The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
    LogMessageDownconversionEnable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    LogMessageTimestampDifferenceMaxMs int
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    LogMessageTimestampType string
    Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time.
    LogPreallocate bool
    Should pre allocate file when create new segment?
    LogRetentionBytes int
    The maximum size of the log before deleting messages.
    LogRetentionHours int
    The number of hours to keep a log file before deleting it.
    LogRetentionMs int
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    LogRollJitterMs int
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    LogRollMs int
    The maximum time before a new log segment is rolled out (in milliseconds).
    LogSegmentBytes int
    The maximum size of a single log file.
    LogSegmentDeleteDelayMs int
    The amount of time to wait before deleting a file from the filesystem. Example: 60000.
    MaxConnectionsPerIp int
    The maximum number of connections allowed from each ip address (defaults to 2147483647).
    MaxIncrementalFetchSessionCacheSlots int
    The maximum number of incremental fetch sessions that the broker will maintain. Example: 1000.
    MessageMaxBytes int
    The maximum size of message that the server can receive. Example: 1048588.
    MinInsyncReplicas int
    When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: 1.
    NumPartitions int
    Number of partitions for autocreated topics.
    OffsetsRetentionMinutes int
    Log retention window in minutes for offsets topic. Example: 10080.
    ProducerPurgatoryPurgeIntervalRequests int
    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
    ReplicaFetchMaxBytes int
    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
    ReplicaFetchResponseMaxBytes int
    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
    SaslOauthbearerExpectedAudience string
    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
    SaslOauthbearerExpectedIssuer string
    Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
    SaslOauthbearerJwksEndpointUrl string
    OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
    SaslOauthbearerSubClaimName string
    Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
    SocketRequestMaxBytes int
    The maximum number of bytes in a socket request (defaults to 104857600).
    TransactionPartitionVerificationEnable bool
    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
    TransactionRemoveExpiredTransactionCleanupIntervalMs int
    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
    TransactionStateLogSegmentBytes int
    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
    AutoCreateTopicsEnable bool
    Enable auto creation of topics.
    CompressionType string
    Enum: gzip, snappy, lz4, zstd, uncompressed, producer. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.
    ConnectionsMaxIdleMs int
    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: 540000.
    DefaultReplicationFactor int
    Replication factor for autocreated topics.
    GroupInitialRebalanceDelayMs int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: 3000.
    GroupMaxSessionTimeoutMs int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 1800000.
    GroupMinSessionTimeoutMs int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 6000.
    LogCleanerDeleteRetentionMs int
    How long are delete records retained? Example: 86400000.
    LogCleanerMaxCompactionLagMs int
    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
    LogCleanerMinCleanableRatio float64
    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: 0.5.
    LogCleanerMinCompactionLagMs int
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    LogCleanupPolicy string
    Enum: delete, compact, compact,delete. The default cleanup policy for segments beyond the retention window.
    LogFlushIntervalMessages int
    The number of messages accumulated on a log partition before messages are flushed to disk. Example: 9223372036854775807.
    LogFlushIntervalMs int
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    LogIndexIntervalBytes int
    The interval with which Kafka adds an entry to the offset index. Example: 4096.
    LogIndexSizeMaxBytes int
    The maximum size in bytes of the offset index. Example: 10485760.
    LogLocalRetentionBytes int
    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
    LogLocalRetentionMs int
    The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
    LogMessageDownconversionEnable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    LogMessageTimestampDifferenceMaxMs int
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    LogMessageTimestampType string
    Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time.
    LogPreallocate bool
    Should pre allocate file when create new segment?
    LogRetentionBytes int
    The maximum size of the log before deleting messages.
    LogRetentionHours int
    The number of hours to keep a log file before deleting it.
    LogRetentionMs int
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    LogRollJitterMs int
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    LogRollMs int
    The maximum time before a new log segment is rolled out (in milliseconds).
    LogSegmentBytes int
    The maximum size of a single log file.
    LogSegmentDeleteDelayMs int
    The amount of time to wait before deleting a file from the filesystem. Example: 60000.
    MaxConnectionsPerIp int
    The maximum number of connections allowed from each ip address (defaults to 2147483647).
    MaxIncrementalFetchSessionCacheSlots int
    The maximum number of incremental fetch sessions that the broker will maintain. Example: 1000.
    MessageMaxBytes int
    The maximum size of message that the server can receive. Example: 1048588.
    MinInsyncReplicas int
    When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: 1.
    NumPartitions int
    Number of partitions for autocreated topics.
    OffsetsRetentionMinutes int
    Log retention window in minutes for offsets topic. Example: 10080.
    ProducerPurgatoryPurgeIntervalRequests int
    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
    ReplicaFetchMaxBytes int
    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
    ReplicaFetchResponseMaxBytes int
    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
    SaslOauthbearerExpectedAudience string
    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
    SaslOauthbearerExpectedIssuer string
    Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
    SaslOauthbearerJwksEndpointUrl string
    OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
    SaslOauthbearerSubClaimName string
    Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
    SocketRequestMaxBytes int
    The maximum number of bytes in a socket request (defaults to 104857600).
    TransactionPartitionVerificationEnable bool
    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
    TransactionRemoveExpiredTransactionCleanupIntervalMs int
    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
    TransactionStateLogSegmentBytes int
    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
    autoCreateTopicsEnable Boolean
    Enable auto creation of topics.
    compressionType String
    Enum: gzip, snappy, lz4, zstd, uncompressed, producer. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.
    connectionsMaxIdleMs Integer
    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: 540000.
    defaultReplicationFactor Integer
    Replication factor for autocreated topics.
    groupInitialRebalanceDelayMs Integer
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: 3000.
    groupMaxSessionTimeoutMs Integer
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 1800000.
    groupMinSessionTimeoutMs Integer
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 6000.
    logCleanerDeleteRetentionMs Integer
    How long are delete records retained? Example: 86400000.
    logCleanerMaxCompactionLagMs Integer
    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
    logCleanerMinCleanableRatio Double
    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: 0.5.
    logCleanerMinCompactionLagMs Integer
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logCleanupPolicy String
    Enum: delete, compact, compact,delete. The default cleanup policy for segments beyond the retention window.
    logFlushIntervalMessages Integer
    The number of messages accumulated on a log partition before messages are flushed to disk. Example: 9223372036854775807.
    logFlushIntervalMs Integer
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes Integer
    The interval with which Kafka adds an entry to the offset index. Example: 4096.
    logIndexSizeMaxBytes Integer
    The maximum size in bytes of the offset index. Example: 10485760.
    logLocalRetentionBytes Integer
    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
    logLocalRetentionMs Integer
    The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
    logMessageDownconversionEnable Boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs Integer
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logMessageTimestampType String
    Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time.
    logPreallocate Boolean
    Should pre allocate file when create new segment?
    logRetentionBytes Integer
    The maximum size of the log before deleting messages.
    logRetentionHours Integer
    The number of hours to keep a log file before deleting it.
    logRetentionMs Integer
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs Integer
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logRollMs Integer
    The maximum time before a new log segment is rolled out (in milliseconds).
    logSegmentBytes Integer
    The maximum size of a single log file.
    logSegmentDeleteDelayMs Integer
    The amount of time to wait before deleting a file from the filesystem. Example: 60000.
    maxConnectionsPerIp Integer
    The maximum number of connections allowed from each ip address (defaults to 2147483647).
    maxIncrementalFetchSessionCacheSlots Integer
    The maximum number of incremental fetch sessions that the broker will maintain. Example: 1000.
    messageMaxBytes Integer
    The maximum size of message that the server can receive. Example: 1048588.
    minInsyncReplicas Integer
    When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: 1.
    numPartitions Integer
    Number of partitions for autocreated topics.
    offsetsRetentionMinutes Integer
    Log retention window in minutes for offsets topic. Example: 10080.
    producerPurgatoryPurgeIntervalRequests Integer
    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
    replicaFetchMaxBytes Integer
    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
    replicaFetchResponseMaxBytes Integer
    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
    saslOauthbearerExpectedAudience String
    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
    saslOauthbearerExpectedIssuer String
    Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
    saslOauthbearerJwksEndpointUrl String
    OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
    saslOauthbearerSubClaimName String
    Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
    socketRequestMaxBytes Integer
    The maximum number of bytes in a socket request (defaults to 104857600).
    transactionPartitionVerificationEnable Boolean
    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
    transactionRemoveExpiredTransactionCleanupIntervalMs Integer
    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
    transactionStateLogSegmentBytes Integer
    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
    autoCreateTopicsEnable boolean
    Enable auto creation of topics.
    compressionType string
    Enum: gzip, snappy, lz4, zstd, uncompressed, producer. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.
    connectionsMaxIdleMs number
    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: 540000.
    defaultReplicationFactor number
    Replication factor for autocreated topics.
    groupInitialRebalanceDelayMs number
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: 3000.
    groupMaxSessionTimeoutMs number
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 1800000.
    groupMinSessionTimeoutMs number
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 6000.
    logCleanerDeleteRetentionMs number
    How long are delete records retained? Example: 86400000.
    logCleanerMaxCompactionLagMs number
    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
    logCleanerMinCleanableRatio number
    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: 0.5.
    logCleanerMinCompactionLagMs number
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logCleanupPolicy string
    Enum: delete, compact, compact,delete. The default cleanup policy for segments beyond the retention window.
    logFlushIntervalMessages number
    The number of messages accumulated on a log partition before messages are flushed to disk. Example: 9223372036854775807.
    logFlushIntervalMs number
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes number
    The interval with which Kafka adds an entry to the offset index. Example: 4096.
    logIndexSizeMaxBytes number
    The maximum size in bytes of the offset index. Example: 10485760.
    logLocalRetentionBytes number
    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
    logLocalRetentionMs number
    The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
    logMessageDownconversionEnable boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs number
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logMessageTimestampType string
    Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time.
    logPreallocate boolean
    Should pre allocate file when create new segment?
    logRetentionBytes number
    The maximum size of the log before deleting messages.
    logRetentionHours number
    The number of hours to keep a log file before deleting it.
    logRetentionMs number
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs number
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logRollMs number
    The maximum time before a new log segment is rolled out (in milliseconds).
    logSegmentBytes number
    The maximum size of a single log file.
    logSegmentDeleteDelayMs number
    The amount of time to wait before deleting a file from the filesystem. Example: 60000.
    maxConnectionsPerIp number
    The maximum number of connections allowed from each ip address (defaults to 2147483647).
    maxIncrementalFetchSessionCacheSlots number
    The maximum number of incremental fetch sessions that the broker will maintain. Example: 1000.
    messageMaxBytes number
    The maximum size of message that the server can receive. Example: 1048588.
    minInsyncReplicas number
    When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: 1.
    numPartitions number
    Number of partitions for autocreated topics.
    offsetsRetentionMinutes number
    Log retention window in minutes for offsets topic. Example: 10080.
    producerPurgatoryPurgeIntervalRequests number
    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
    replicaFetchMaxBytes number
    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
    replicaFetchResponseMaxBytes number
    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
    saslOauthbearerExpectedAudience string
    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
    saslOauthbearerExpectedIssuer string
    Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
    saslOauthbearerJwksEndpointUrl string
    OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
    saslOauthbearerSubClaimName string
    Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
    socketRequestMaxBytes number
    The maximum number of bytes in a socket request (defaults to 104857600).
    transactionPartitionVerificationEnable boolean
    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
    transactionRemoveExpiredTransactionCleanupIntervalMs number
    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
    transactionStateLogSegmentBytes number
    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
    auto_create_topics_enable bool
    Enable auto creation of topics.
    compression_type str
    Enum: gzip, snappy, lz4, zstd, uncompressed, producer. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.
    connections_max_idle_ms int
    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: 540000.
    default_replication_factor int
    Replication factor for autocreated topics.
    group_initial_rebalance_delay_ms int
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: 3000.
    group_max_session_timeout_ms int
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 1800000.
    group_min_session_timeout_ms int
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 6000.
    log_cleaner_delete_retention_ms int
    How long are delete records retained? Example: 86400000.
    log_cleaner_max_compaction_lag_ms int
    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
    log_cleaner_min_cleanable_ratio float
    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: 0.5.
    log_cleaner_min_compaction_lag_ms int
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    log_cleanup_policy str
    Enum: delete, compact, compact,delete. The default cleanup policy for segments beyond the retention window.
    log_flush_interval_messages int
    The number of messages accumulated on a log partition before messages are flushed to disk. Example: 9223372036854775807.
    log_flush_interval_ms int
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    log_index_interval_bytes int
    The interval with which Kafka adds an entry to the offset index. Example: 4096.
    log_index_size_max_bytes int
    The maximum size in bytes of the offset index. Example: 10485760.
    log_local_retention_bytes int
    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
    log_local_retention_ms int
    The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
    log_message_downconversion_enable bool
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    log_message_timestamp_difference_max_ms int
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    log_message_timestamp_type str
    Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time.
    log_preallocate bool
    Should pre allocate file when create new segment?
    log_retention_bytes int
    The maximum size of the log before deleting messages.
    log_retention_hours int
    The number of hours to keep a log file before deleting it.
    log_retention_ms int
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    log_roll_jitter_ms int
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    log_roll_ms int
    The maximum time before a new log segment is rolled out (in milliseconds).
    log_segment_bytes int
    The maximum size of a single log file.
    log_segment_delete_delay_ms int
    The amount of time to wait before deleting a file from the filesystem. Example: 60000.
    max_connections_per_ip int
    The maximum number of connections allowed from each ip address (defaults to 2147483647).
    max_incremental_fetch_session_cache_slots int
    The maximum number of incremental fetch sessions that the broker will maintain. Example: 1000.
    message_max_bytes int
    The maximum size of message that the server can receive. Example: 1048588.
    min_insync_replicas int
    When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: 1.
    num_partitions int
    Number of partitions for autocreated topics.
    offsets_retention_minutes int
    Log retention window in minutes for offsets topic. Example: 10080.
    producer_purgatory_purge_interval_requests int
    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
    replica_fetch_max_bytes int
    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
    replica_fetch_response_max_bytes int
    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
    sasl_oauthbearer_expected_audience str
    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
    sasl_oauthbearer_expected_issuer str
    Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
    sasl_oauthbearer_jwks_endpoint_url str
    OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
    sasl_oauthbearer_sub_claim_name str
    Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
    socket_request_max_bytes int
    The maximum number of bytes in a socket request (defaults to 104857600).
    transaction_partition_verification_enable bool
    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
    transaction_remove_expired_transaction_cleanup_interval_ms int
    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
    transaction_state_log_segment_bytes int
    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).
    autoCreateTopicsEnable Boolean
    Enable auto creation of topics.
    compressionType String
    Enum: gzip, snappy, lz4, zstd, uncompressed, producer. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.
    connectionsMaxIdleMs Number
    Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. Example: 540000.
    defaultReplicationFactor Number
    Replication factor for autocreated topics.
    groupInitialRebalanceDelayMs Number
    The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. Example: 3000.
    groupMaxSessionTimeoutMs Number
    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 1800000.
    groupMinSessionTimeoutMs Number
    The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Example: 6000.
    logCleanerDeleteRetentionMs Number
    How long are delete records retained? Example: 86400000.
    logCleanerMaxCompactionLagMs Number
    The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted.
    logCleanerMinCleanableRatio Number
    Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. Example: 0.5.
    logCleanerMinCompactionLagMs Number
    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.
    logCleanupPolicy String
    Enum: delete, compact, compact,delete. The default cleanup policy for segments beyond the retention window.
    logFlushIntervalMessages Number
    The number of messages accumulated on a log partition before messages are flushed to disk. Example: 9223372036854775807.
    logFlushIntervalMs Number
    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used.
    logIndexIntervalBytes Number
    The interval with which Kafka adds an entry to the offset index. Example: 4096.
    logIndexSizeMaxBytes Number
    The maximum size in bytes of the offset index. Example: 10485760.
    logLocalRetentionBytes Number
    The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
    logLocalRetentionMs Number
    The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
    logMessageDownconversionEnable Boolean
    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
    logMessageTimestampDifferenceMaxMs Number
    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
    logMessageTimestampType String
    Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time.
    logPreallocate Boolean
    Should pre allocate file when create new segment?
    logRetentionBytes Number
    The maximum size of the log before deleting messages.
    logRetentionHours Number
    The number of hours to keep a log file before deleting it.
    logRetentionMs Number
    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.
    logRollJitterMs Number
    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used.
    logRollMs Number
    The maximum time before a new log segment is rolled out (in milliseconds).
    logSegmentBytes Number
    The maximum size of a single log file.
    logSegmentDeleteDelayMs Number
    The amount of time to wait before deleting a file from the filesystem. Example: 60000.
    maxConnectionsPerIp Number
    The maximum number of connections allowed from each ip address (defaults to 2147483647).
    maxIncrementalFetchSessionCacheSlots Number
    The maximum number of incremental fetch sessions that the broker will maintain. Example: 1000.
    messageMaxBytes Number
    The maximum size of message that the server can receive. Example: 1048588.
    minInsyncReplicas Number
    When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. Example: 1.
    numPartitions Number
    Number of partitions for autocreated topics.
    offsetsRetentionMinutes Number
    Log retention window in minutes for offsets topic. Example: 10080.
    producerPurgatoryPurgeIntervalRequests Number
    The purge interval (in number of requests) of the producer request purgatory(defaults to 1000).
    replicaFetchMaxBytes Number
    The number of bytes of messages to attempt to fetch for each partition (defaults to 1048576). This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made.
    replicaFetchResponseMaxBytes Number
    Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
    saslOauthbearerExpectedAudience String
    The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
    saslOauthbearerExpectedIssuer String
    Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
    saslOauthbearerJwksEndpointUrl String
    OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
    saslOauthbearerSubClaimName String
    Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
    socketRequestMaxBytes Number
    The maximum number of bytes in a socket request (defaults to 104857600).
    transactionPartitionVerificationEnable Boolean
    Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
    transactionRemoveExpiredTransactionCleanupIntervalMs Number
    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (defaults to 3600000 (1 hour)).
    transactionStateLogSegmentBytes Number
    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (defaults to 104857600 (100 mebibytes)).

    KafkaKafkaUserConfigKafkaAuthenticationMethods, KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs

    Certificate bool
    Enable certificate/SSL authentication. Default: true.
    Sasl bool
    Enable SASL authentication. Default: false.
    Certificate bool
    Enable certificate/SSL authentication. Default: true.
    Sasl bool
    Enable SASL authentication. Default: false.
    certificate Boolean
    Enable certificate/SSL authentication. Default: true.
    sasl Boolean
    Enable SASL authentication. Default: false.
    certificate boolean
    Enable certificate/SSL authentication. Default: true.
    sasl boolean
    Enable SASL authentication. Default: false.
    certificate bool
    Enable certificate/SSL authentication. Default: true.
    sasl bool
    Enable SASL authentication. Default: false.
    certificate Boolean
    Enable certificate/SSL authentication. Default: true.
    sasl Boolean
    Enable SASL authentication. Default: false.

    KafkaKafkaUserConfigKafkaConnectConfig, KafkaKafkaUserConfigKafkaConnectConfigArgs

    ConnectorClientConfigOverridePolicy string
    Enum: None, All. Defines what client configurations can be overridden by the connector. Default is None.
    ConsumerAutoOffsetReset string
    Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
    ConsumerFetchMaxBytes int
    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
    ConsumerIsolationLevel string
    Enum: read_uncommitted, read_committed. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
    ConsumerMaxPartitionFetchBytes int
    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
    ConsumerMaxPollIntervalMs int
    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
    ConsumerMaxPollRecords int
    The maximum number of records returned in a single call to poll() (defaults to 500).
    OffsetFlushIntervalMs int
    The interval at which to try committing offsets for tasks (defaults to 60000).
    OffsetFlushTimeoutMs int
    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
    ProducerBatchSize int
    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
    ProducerBufferMemory int
    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
    ProducerCompressionType string
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    ProducerLingerMs int
    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
    ProducerMaxRequestSize int
    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
    ScheduledRebalanceMaxDelayMs int
    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
    SessionTimeoutMs int
    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
    ConnectorClientConfigOverridePolicy string
    Enum: None, All. Defines what client configurations can be overridden by the connector. Default is None.
    ConsumerAutoOffsetReset string
    Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
    ConsumerFetchMaxBytes int
    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
    ConsumerIsolationLevel string
    Enum: read_uncommitted, read_committed. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
    ConsumerMaxPartitionFetchBytes int
    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
    ConsumerMaxPollIntervalMs int
    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
    ConsumerMaxPollRecords int
    The maximum number of records returned in a single call to poll() (defaults to 500).
    OffsetFlushIntervalMs int
    The interval at which to try committing offsets for tasks (defaults to 60000).
    OffsetFlushTimeoutMs int
    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
    ProducerBatchSize int
    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
    ProducerBufferMemory int
    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
    ProducerCompressionType string
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    ProducerLingerMs int
    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
    ProducerMaxRequestSize int
    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
    ScheduledRebalanceMaxDelayMs int
    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
    SessionTimeoutMs int
    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
    connectorClientConfigOverridePolicy String
    Enum: None, All. Defines what client configurations can be overridden by the connector. Default is None.
    consumerAutoOffsetReset String
    Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
    consumerFetchMaxBytes Integer
    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
    consumerIsolationLevel String
    Enum: read_uncommitted, read_committed. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
    consumerMaxPartitionFetchBytes Integer
    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
    consumerMaxPollIntervalMs Integer
    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
    consumerMaxPollRecords Integer
    The maximum number of records returned in a single call to poll() (defaults to 500).
    offsetFlushIntervalMs Integer
    The interval at which to try committing offsets for tasks (defaults to 60000).
    offsetFlushTimeoutMs Integer
    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
    producerBatchSize Integer
    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
    producerBufferMemory Integer
    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
    producerCompressionType String
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producerLingerMs Integer
    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
    producerMaxRequestSize Integer
    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
    scheduledRebalanceMaxDelayMs Integer
    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
    sessionTimeoutMs Integer
    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
    connectorClientConfigOverridePolicy string
    Enum: None, All. Defines what client configurations can be overridden by the connector. Default is None.
    consumerAutoOffsetReset string
    Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
    consumerFetchMaxBytes number
    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
    consumerIsolationLevel string
    Enum: read_uncommitted, read_committed. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
    consumerMaxPartitionFetchBytes number
    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
    consumerMaxPollIntervalMs number
    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
    consumerMaxPollRecords number
    The maximum number of records returned in a single call to poll() (defaults to 500).
    offsetFlushIntervalMs number
    The interval at which to try committing offsets for tasks (defaults to 60000).
    offsetFlushTimeoutMs number
    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
    producerBatchSize number
    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
    producerBufferMemory number
    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
    producerCompressionType string
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producerLingerMs number
    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
    producerMaxRequestSize number
    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
    scheduledRebalanceMaxDelayMs number
    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
    sessionTimeoutMs number
    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
    connector_client_config_override_policy str
    Enum: None, All. Defines what client configurations can be overridden by the connector. Default is None.
    consumer_auto_offset_reset str
    Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
    consumer_fetch_max_bytes int
    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
    consumer_isolation_level str
    Enum: read_uncommitted, read_committed. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
    consumer_max_partition_fetch_bytes int
    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
    consumer_max_poll_interval_ms int
    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
    consumer_max_poll_records int
    The maximum number of records returned in a single call to poll() (defaults to 500).
    offset_flush_interval_ms int
    The interval at which to try committing offsets for tasks (defaults to 60000).
    offset_flush_timeout_ms int
    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
    producer_batch_size int
    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
    producer_buffer_memory int
    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
    producer_compression_type str
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producer_linger_ms int
    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
    producer_max_request_size int
    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
    scheduled_rebalance_max_delay_ms int
    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
    session_timeout_ms int
    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
    connectorClientConfigOverridePolicy String
    Enum: None, All. Defines what client configurations can be overridden by the connector. Default is None.
    consumerAutoOffsetReset String
    Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
    consumerFetchMaxBytes Number
    Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
    consumerIsolationLevel String
    Enum: read_uncommitted, read_committed. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
    consumerMaxPartitionFetchBytes Number
    Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
    consumerMaxPollIntervalMs Number
    The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
    consumerMaxPollRecords Number
    The maximum number of records returned in a single call to poll() (defaults to 500).
    offsetFlushIntervalMs Number
    The interval at which to try committing offsets for tasks (defaults to 60000).
    offsetFlushTimeoutMs Number
    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
    producerBatchSize Number
    This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
    producerBufferMemory Number
    The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
    producerCompressionType String
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producerLingerMs Number
    This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
    producerMaxRequestSize Number
    This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
    scheduledRebalanceMaxDelayMs Number
    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
    sessionTimeoutMs Number
    The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

    KafkaKafkaUserConfigKafkaConnectSecretProvider, KafkaKafkaUserConfigKafkaConnectSecretProviderArgs

    Name string
    Name of the secret provider. Used to reference secrets in connector config.
    Aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
    AWS config for Secret Provider
    Vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
    Vault Config for Secret Provider
    Name string
    Name of the secret provider. Used to reference secrets in connector config.
    Aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
    AWS config for Secret Provider
    Vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
    Vault Config for Secret Provider
    name String
    Name of the secret provider. Used to reference secrets in connector config.
    aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
    AWS config for Secret Provider
    vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
    Vault Config for Secret Provider
    name string
    Name of the secret provider. Used to reference secrets in connector config.
    aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
    AWS config for Secret Provider
    vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
    Vault Config for Secret Provider
    name str
    Name of the secret provider. Used to reference secrets in connector config.
    aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
    AWS config for Secret Provider
    vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
    Vault Config for Secret Provider
    name String
    Name of the secret provider. Used to reference secrets in connector config.
    aws Property Map
    AWS config for Secret Provider
    vault Property Map
    Vault Config for Secret Provider

    KafkaKafkaUserConfigKafkaConnectSecretProviderAws, KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs

    AuthMethod string
    Enum: credentials. Auth method of the vault secret provider.
    Region string
    Region used to lookup secrets with AWS SecretManager.
    AccessKey string
    Access key used to authenticate with aws.
    SecretKey string
    Secret key used to authenticate with aws.
    AuthMethod string
    Enum: credentials. Auth method of the vault secret provider.
    Region string
    Region used to lookup secrets with AWS SecretManager.
    AccessKey string
    Access key used to authenticate with aws.
    SecretKey string
    Secret key used to authenticate with aws.
    authMethod String
    Enum: credentials. Auth method of the vault secret provider.
    region String
    Region used to lookup secrets with AWS SecretManager.
    accessKey String
    Access key used to authenticate with aws.
    secretKey String
    Secret key used to authenticate with aws.
    authMethod string
    Enum: credentials. Auth method of the vault secret provider.
    region string
    Region used to lookup secrets with AWS SecretManager.
    accessKey string
    Access key used to authenticate with aws.
    secretKey string
    Secret key used to authenticate with aws.
    auth_method str
    Enum: credentials. Auth method of the vault secret provider.
    region str
    Region used to lookup secrets with AWS SecretManager.
    access_key str
    Access key used to authenticate with aws.
    secret_key str
    Secret key used to authenticate with aws.
    authMethod String
    Enum: credentials. Auth method of the vault secret provider.
    region String
    Region used to lookup secrets with AWS SecretManager.
    accessKey String
    Access key used to authenticate with aws.
    secretKey String
    Secret key used to authenticate with aws.

    KafkaKafkaUserConfigKafkaConnectSecretProviderVault, KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs

    Address string
    Address of the Vault server.
    AuthMethod string
    Enum: token. Auth method of the vault secret provider.
    EngineVersion int
    Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
    Token string
    Token used to authenticate with vault and auth method token.
    Address string
    Address of the Vault server.
    AuthMethod string
    Enum: token. Auth method of the vault secret provider.
    EngineVersion int
    Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
    Token string
    Token used to authenticate with vault and auth method token.
    address String
    Address of the Vault server.
    authMethod String
    Enum: token. Auth method of the vault secret provider.
    engineVersion Integer
    Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
    token String
    Token used to authenticate with vault and auth method token.
    address string
    Address of the Vault server.
    authMethod string
    Enum: token. Auth method of the vault secret provider.
    engineVersion number
    Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
    token string
    Token used to authenticate with vault and auth method token.
    address str
    Address of the Vault server.
    auth_method str
    Enum: token. Auth method of the vault secret provider.
    engine_version int
    Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
    token str
    Token used to authenticate with vault and auth method token.
    address String
    Address of the Vault server.
    authMethod String
    Enum: token. Auth method of the vault secret provider.
    engineVersion Number
    Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
    token String
    Token used to authenticate with vault and auth method token.

    KafkaKafkaUserConfigKafkaRestConfig, KafkaKafkaUserConfigKafkaRestConfigArgs

    ConsumerEnableAutoCommit bool
    If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
    ConsumerRequestMaxBytes int
    Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
    ConsumerRequestTimeoutMs int
    Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
    NameStrategy string
    Enum: topic_name, record_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
    NameStrategyValidation bool
    If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
    ProducerAcks string
    Enum: all, -1, 0, 1. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
    ProducerCompressionType string
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    ProducerLingerMs int
    Wait for up to the given delay to allow batching records together. Default: 0.
    ProducerMaxRequestSize int
    The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
    SimpleconsumerPoolSizeMax int
    Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
    ConsumerEnableAutoCommit bool
    If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
    ConsumerRequestMaxBytes int
    Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
    ConsumerRequestTimeoutMs int
    Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
    NameStrategy string
    Enum: topic_name, record_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
    NameStrategyValidation bool
    If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
    ProducerAcks string
    Enum: all, -1, 0, 1. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
    ProducerCompressionType string
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    ProducerLingerMs int
    Wait for up to the given delay to allow batching records together. Default: 0.
    ProducerMaxRequestSize int
    The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
    SimpleconsumerPoolSizeMax int
    Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
    consumerEnableAutoCommit Boolean
    If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
    consumerRequestMaxBytes Integer
    Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
    consumerRequestTimeoutMs Integer
    Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
    nameStrategy String
    Enum: topic_name, record_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
    nameStrategyValidation Boolean
    If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
    producerAcks String
    Enum: all, -1, 0, 1. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
    producerCompressionType String
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producerLingerMs Integer
    Wait for up to the given delay to allow batching records together. Default: 0.
    producerMaxRequestSize Integer
    The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
    simpleconsumerPoolSizeMax Integer
    Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
    consumerEnableAutoCommit boolean
    If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
    consumerRequestMaxBytes number
    Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
    consumerRequestTimeoutMs number
    Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
    nameStrategy string
    Enum: topic_name, record_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
    nameStrategyValidation boolean
    If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
    producerAcks string
    Enum: all, -1, 0, 1. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
    producerCompressionType string
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producerLingerMs number
    Wait for up to the given delay to allow batching records together. Default: 0.
    producerMaxRequestSize number
    The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
    simpleconsumerPoolSizeMax number
    Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
    consumer_enable_auto_commit bool
    If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
    consumer_request_max_bytes int
    Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
    consumer_request_timeout_ms int
    Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
    name_strategy str
    Enum: topic_name, record_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
    name_strategy_validation bool
    If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
    producer_acks str
    Enum: all, -1, 0, 1. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
    producer_compression_type str
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producer_linger_ms int
    Wait for up to the given delay to allow batching records together. Default: 0.
    producer_max_request_size int
    The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
    simpleconsumer_pool_size_max int
    Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
    consumerEnableAutoCommit Boolean
    If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
    consumerRequestMaxBytes Number
    Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
    consumerRequestTimeoutMs Number
    Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
    nameStrategy String
    Enum: topic_name, record_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
    nameStrategyValidation Boolean
    If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
    producerAcks String
    Enum: all, -1, 0, 1. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
    producerCompressionType String
    Enum: gzip, snappy, lz4, zstd, none. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
    producerLingerMs Number
    Wait for up to the given delay to allow batching records together. Default: 0.
    producerMaxRequestSize Number
    The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
    simpleconsumerPoolSizeMax Number
    Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.

    KafkaKafkaUserConfigPrivateAccess, KafkaKafkaUserConfigPrivateAccessArgs

    Kafka bool
    Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    KafkaConnect bool
    Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    KafkaRest bool
    Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    Prometheus bool
    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    SchemaRegistry bool
    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    Kafka bool
    Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    KafkaConnect bool
    Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    KafkaRest bool
    Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    Prometheus bool
    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    SchemaRegistry bool
    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafka Boolean
    Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafkaConnect Boolean
    Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafkaRest Boolean
    Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    prometheus Boolean
    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    schemaRegistry Boolean
    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafka boolean
    Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafkaConnect boolean
    Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafkaRest boolean
    Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    prometheus boolean
    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    schemaRegistry boolean
    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafka bool
    Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafka_connect bool
    Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafka_rest bool
    Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    prometheus bool
    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    schema_registry bool
    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafka Boolean
    Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafkaConnect Boolean
    Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    kafkaRest Boolean
    Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    prometheus Boolean
    Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
    schemaRegistry Boolean
    Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

    KafkaKafkaUserConfigPrivatelinkAccess, KafkaKafkaUserConfigPrivatelinkAccessArgs

    Jolokia bool
    Enable jolokia.
    Kafka bool
    Enable kafka.
    KafkaConnect bool
    Enable kafka_connect.
    KafkaRest bool
    Enable kafka_rest.
    Prometheus bool
    Enable prometheus.
    SchemaRegistry bool
    Enable schema_registry.
    Jolokia bool
    Enable jolokia.
    Kafka bool
    Enable kafka.
    KafkaConnect bool
    Enable kafka_connect.
    KafkaRest bool
    Enable kafka_rest.
    Prometheus bool
    Enable prometheus.
    SchemaRegistry bool
    Enable schema_registry.
    jolokia Boolean
    Enable jolokia.
    kafka Boolean
    Enable kafka.
    kafkaConnect Boolean
    Enable kafka_connect.
    kafkaRest Boolean
    Enable kafka_rest.
    prometheus Boolean
    Enable prometheus.
    schemaRegistry Boolean
    Enable schema_registry.
    jolokia boolean
    Enable jolokia.
    kafka boolean
    Enable kafka.
    kafkaConnect boolean
    Enable kafka_connect.
    kafkaRest boolean
    Enable kafka_rest.
    prometheus boolean
    Enable prometheus.
    schemaRegistry boolean
    Enable schema_registry.
    jolokia bool
    Enable jolokia.
    kafka bool
    Enable kafka.
    kafka_connect bool
    Enable kafka_connect.
    kafka_rest bool
    Enable kafka_rest.
    prometheus bool
    Enable prometheus.
    schema_registry bool
    Enable schema_registry.
    jolokia Boolean
    Enable jolokia.
    kafka Boolean
    Enable kafka.
    kafkaConnect Boolean
    Enable kafka_connect.
    kafkaRest Boolean
    Enable kafka_rest.
    prometheus Boolean
    Enable prometheus.
    schemaRegistry Boolean
    Enable schema_registry.

    KafkaKafkaUserConfigPublicAccess, KafkaKafkaUserConfigPublicAccessArgs

    Kafka bool
    Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
    KafkaConnect bool
    Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
    KafkaRest bool
    Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
    Prometheus bool
    Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
    SchemaRegistry bool
    Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
    Kafka bool
    Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
    KafkaConnect bool
    Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
    KafkaRest bool
    Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
    Prometheus bool
    Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
    SchemaRegistry bool
    Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
    kafka Boolean
    Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
    kafkaConnect Boolean
    Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
    kafkaRest Boolean
    Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
    prometheus Boolean
    Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
    schemaRegistry Boolean
    Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
    kafka boolean
    Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
    kafkaConnect boolean
    Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
    kafkaRest boolean
    Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
    prometheus boolean
    Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
    schemaRegistry boolean
    Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
    kafka bool
    Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
    kafka_connect bool
    Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
    kafka_rest bool
    Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
    prometheus bool
    Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
    schema_registry bool
    Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
    kafka Boolean
    Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
    kafkaConnect Boolean
    Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
    kafkaRest Boolean
    Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
    prometheus Boolean
    Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
    schemaRegistry Boolean
    Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.

    KafkaKafkaUserConfigSchemaRegistryConfig, KafkaKafkaUserConfigSchemaRegistryConfigArgs

    LeaderEligibility bool
    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
    TopicName string
    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
    LeaderEligibility bool
    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
    TopicName string
    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
    leaderEligibility Boolean
    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
    topicName String
    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
    leaderEligibility boolean
    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
    topicName string
    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
    leader_eligibility bool
    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
    topic_name str
    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
    leaderEligibility Boolean
    If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
    topicName String
    The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

    KafkaKafkaUserConfigTieredStorage, KafkaKafkaUserConfigTieredStorageArgs

    Enabled bool
    Whether to enable the tiered storage functionality.
    LocalCache KafkaKafkaUserConfigTieredStorageLocalCache
    Local cache configuration

    Deprecated: This property is deprecated.

    Enabled bool
    Whether to enable the tiered storage functionality.
    LocalCache KafkaKafkaUserConfigTieredStorageLocalCache
    Local cache configuration

    Deprecated: This property is deprecated.

    enabled Boolean
    Whether to enable the tiered storage functionality.
    localCache KafkaKafkaUserConfigTieredStorageLocalCache
    Local cache configuration

    Deprecated: This property is deprecated.

    enabled boolean
    Whether to enable the tiered storage functionality.
    localCache KafkaKafkaUserConfigTieredStorageLocalCache
    Local cache configuration

    Deprecated: This property is deprecated.

    enabled bool
    Whether to enable the tiered storage functionality.
    local_cache KafkaKafkaUserConfigTieredStorageLocalCache
    Local cache configuration

    Deprecated: This property is deprecated.

    enabled Boolean
    Whether to enable the tiered storage functionality.
    localCache Property Map
    Local cache configuration

    Deprecated: This property is deprecated.

    KafkaKafkaUserConfigTieredStorageLocalCache, KafkaKafkaUserConfigTieredStorageLocalCacheArgs

    Size int
    Local cache size in bytes. Example: 1073741824.

    Deprecated: This property is deprecated.

    Size int
    Local cache size in bytes. Example: 1073741824.

    Deprecated: This property is deprecated.

    size Integer
    Local cache size in bytes. Example: 1073741824.

    Deprecated: This property is deprecated.

    size number
    Local cache size in bytes. Example: 1073741824.

    Deprecated: This property is deprecated.

    size int
    Local cache size in bytes. Example: 1073741824.

    Deprecated: This property is deprecated.

    size Number
    Local cache size in bytes. Example: 1073741824.

    Deprecated: This property is deprecated.

    KafkaServiceIntegration, KafkaServiceIntegrationArgs

    IntegrationType string
    Type of the service integration. The only supported value at the moment is read_replica
    SourceServiceName string
    Name of the source service
    IntegrationType string
    Type of the service integration. The only supported value at the moment is read_replica
    SourceServiceName string
    Name of the source service
    integrationType String
    Type of the service integration. The only supported value at the moment is read_replica
    sourceServiceName String
    Name of the source service
    integrationType string
    Type of the service integration. The only supported value at the moment is read_replica
    sourceServiceName string
    Name of the source service
    integration_type str
    Type of the service integration. The only supported value at the moment is read_replica
    source_service_name str
    Name of the source service
    integrationType String
    Type of the service integration. The only supported value at the moment is read_replica
    sourceServiceName String
    Name of the source service

    KafkaTag, KafkaTagArgs

    Key string
    Service tag key
    Value string
    Service tag value
    Key string
    Service tag key
    Value string
    Service tag value
    key String
    Service tag key
    value String
    Service tag value
    key string
    Service tag key
    value string
    Service tag value
    key str
    Service tag key
    value str
    Service tag value
    key String
    Service tag key
    value String
    Service tag value

    KafkaTechEmail, KafkaTechEmailArgs

    Email string
    An email address to contact for technical issues
    Email string
    An email address to contact for technical issues
    email String
    An email address to contact for technical issues
    email string
    An email address to contact for technical issues
    email str
    An email address to contact for technical issues
    email String
    An email address to contact for technical issues

    Import

    $ pulumi import aiven:index/kafka:Kafka example_kafka PROJECT/SERVICE_NAME
    

    To learn more about importing existing cloud resources, see Importing resources.

    Package Details

    Repository
    Aiven pulumi/pulumi-aiven
    License
    Apache-2.0
    Notes
    This Pulumi package is based on the aiven Terraform Provider.
    aiven logo
    Aiven v6.18.0 published on Thursday, Jun 27, 2024 by Pulumi