1. Packages
  2. Aiven Provider
  3. API Docs
  4. Kafka
Aiven v6.37.0 published on Thursday, Apr 10, 2025 by Pulumi

aiven.Kafka

Explore with Pulumi AI

Creates and manages an Aiven for Apache Kafka® service.

Example Usage

import * as pulumi from "@pulumi/pulumi";
import * as aiven from "@pulumi/aiven";

const exampleKafka = new aiven.Kafka("example_kafka", {
    project: exampleProject.project,
    cloudName: "google-europe-west1",
    plan: "business-4",
    serviceName: "example-kafka",
    maintenanceWindowDow: "monday",
    maintenanceWindowTime: "10:00:00",
    kafkaUserConfig: {
        kafkaRest: true,
        kafkaConnect: true,
        schemaRegistry: true,
        kafkaVersion: "3.8",
        kafka: {
            groupMaxSessionTimeoutMs: 70000,
            logRetentionBytes: 1000000000,
        },
        publicAccess: {
            kafkaRest: true,
            kafkaConnect: true,
        },
    },
});
Copy
import pulumi
import pulumi_aiven as aiven

example_kafka = aiven.Kafka("example_kafka",
    project=example_project["project"],
    cloud_name="google-europe-west1",
    plan="business-4",
    service_name="example-kafka",
    maintenance_window_dow="monday",
    maintenance_window_time="10:00:00",
    kafka_user_config={
        "kafka_rest": True,
        "kafka_connect": True,
        "schema_registry": True,
        "kafka_version": "3.8",
        "kafka": {
            "group_max_session_timeout_ms": 70000,
            "log_retention_bytes": 1000000000,
        },
        "public_access": {
            "kafka_rest": True,
            "kafka_connect": True,
        },
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-aiven/sdk/v6/go/aiven"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := aiven.NewKafka(ctx, "example_kafka", &aiven.KafkaArgs{
			Project:               pulumi.Any(exampleProject.Project),
			CloudName:             pulumi.String("google-europe-west1"),
			Plan:                  pulumi.String("business-4"),
			ServiceName:           pulumi.String("example-kafka"),
			MaintenanceWindowDow:  pulumi.String("monday"),
			MaintenanceWindowTime: pulumi.String("10:00:00"),
			KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
				KafkaRest:      pulumi.Bool(true),
				KafkaConnect:   pulumi.Bool(true),
				SchemaRegistry: pulumi.Bool(true),
				KafkaVersion:   pulumi.String("3.8"),
				Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
					GroupMaxSessionTimeoutMs: pulumi.Int(70000),
					LogRetentionBytes:        pulumi.Int(1000000000),
				},
				PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
					KafkaRest:    pulumi.Bool(true),
					KafkaConnect: pulumi.Bool(true),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aiven = Pulumi.Aiven;

return await Deployment.RunAsync(() => 
{
    var exampleKafka = new Aiven.Kafka("example_kafka", new()
    {
        Project = exampleProject.Project,
        CloudName = "google-europe-west1",
        Plan = "business-4",
        ServiceName = "example-kafka",
        MaintenanceWindowDow = "monday",
        MaintenanceWindowTime = "10:00:00",
        KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
        {
            KafkaRest = true,
            KafkaConnect = true,
            SchemaRegistry = true,
            KafkaVersion = "3.8",
            Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
            {
                GroupMaxSessionTimeoutMs = 70000,
                LogRetentionBytes = 1000000000,
            },
            PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
            {
                KafkaRest = true,
                KafkaConnect = true,
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aiven.Kafka;
import com.pulumi.aiven.KafkaArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigKafkaArgs;
import com.pulumi.aiven.inputs.KafkaKafkaUserConfigPublicAccessArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var exampleKafka = new Kafka("exampleKafka", KafkaArgs.builder()
            .project(exampleProject.project())
            .cloudName("google-europe-west1")
            .plan("business-4")
            .serviceName("example-kafka")
            .maintenanceWindowDow("monday")
            .maintenanceWindowTime("10:00:00")
            .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
                .kafkaRest(true)
                .kafkaConnect(true)
                .schemaRegistry(true)
                .kafkaVersion("3.8")
                .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
                    .groupMaxSessionTimeoutMs(70000)
                    .logRetentionBytes(1000000000)
                    .build())
                .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
                    .kafkaRest(true)
                    .kafkaConnect(true)
                    .build())
                .build())
            .build());

    }
}
Copy
resources:
  exampleKafka:
    type: aiven:Kafka
    name: example_kafka
    properties:
      project: ${exampleProject.project}
      cloudName: google-europe-west1
      plan: business-4
      serviceName: example-kafka
      maintenanceWindowDow: monday
      maintenanceWindowTime: 10:00:00
      kafkaUserConfig:
        kafkaRest: true
        kafkaConnect: true
        schemaRegistry: true
        kafkaVersion: '3.8'
        kafka:
          groupMaxSessionTimeoutMs: 70000
          logRetentionBytes: 1e+09
        publicAccess:
          kafkaRest: true
          kafkaConnect: true
Copy

Create Kafka Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new Kafka(name: string, args: KafkaArgs, opts?: CustomResourceOptions);
@overload
def Kafka(resource_name: str,
          args: KafkaArgs,
          opts: Optional[ResourceOptions] = None)

@overload
def Kafka(resource_name: str,
          opts: Optional[ResourceOptions] = None,
          plan: Optional[str] = None,
          service_name: Optional[str] = None,
          project: Optional[str] = None,
          maintenance_window_time: Optional[str] = None,
          default_acl: Optional[bool] = None,
          kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
          karapace: Optional[bool] = None,
          maintenance_window_dow: Optional[str] = None,
          additional_disk_space: Optional[str] = None,
          disk_space: Optional[str] = None,
          kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
          project_vpc_id: Optional[str] = None,
          service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
          cloud_name: Optional[str] = None,
          static_ips: Optional[Sequence[str]] = None,
          tags: Optional[Sequence[KafkaTagArgs]] = None,
          tech_emails: Optional[Sequence[KafkaTechEmailArgs]] = None,
          termination_protection: Optional[bool] = None)
func NewKafka(ctx *Context, name string, args KafkaArgs, opts ...ResourceOption) (*Kafka, error)
public Kafka(string name, KafkaArgs args, CustomResourceOptions? opts = null)
public Kafka(String name, KafkaArgs args)
public Kafka(String name, KafkaArgs args, CustomResourceOptions options)
type: aiven:Kafka
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. KafkaArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. KafkaArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. KafkaArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. KafkaArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. KafkaArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var kafkaResource = new Aiven.Kafka("kafkaResource", new()
{
    Plan = "string",
    ServiceName = "string",
    Project = "string",
    MaintenanceWindowTime = "string",
    DefaultAcl = false,
    KafkaServer = new[]
    {
        new Aiven.Inputs.KafkaKafkaArgs
        {
            AccessCert = "string",
            AccessKey = "string",
            ConnectUri = "string",
            RestUri = "string",
            SchemaRegistryUri = "string",
            Uris = new[]
            {
                "string",
            },
        },
    },
    MaintenanceWindowDow = "string",
    AdditionalDiskSpace = "string",
    KafkaUserConfig = new Aiven.Inputs.KafkaKafkaUserConfigArgs
    {
        AivenKafkaTopicMessages = false,
        CustomDomain = "string",
        FollowerFetching = new Aiven.Inputs.KafkaKafkaUserConfigFollowerFetchingArgs
        {
            Enabled = false,
        },
        IpFilterObjects = new[]
        {
            new Aiven.Inputs.KafkaKafkaUserConfigIpFilterObjectArgs
            {
                Network = "string",
                Description = "string",
            },
        },
        IpFilterStrings = new[]
        {
            "string",
        },
        Kafka = new Aiven.Inputs.KafkaKafkaUserConfigKafkaArgs
        {
            AutoCreateTopicsEnable = false,
            CompressionType = "string",
            ConnectionsMaxIdleMs = 0,
            DefaultReplicationFactor = 0,
            GroupInitialRebalanceDelayMs = 0,
            GroupMaxSessionTimeoutMs = 0,
            GroupMinSessionTimeoutMs = 0,
            LogCleanerDeleteRetentionMs = 0,
            LogCleanerMaxCompactionLagMs = 0,
            LogCleanerMinCleanableRatio = 0,
            LogCleanerMinCompactionLagMs = 0,
            LogCleanupPolicy = "string",
            LogFlushIntervalMessages = 0,
            LogFlushIntervalMs = 0,
            LogIndexIntervalBytes = 0,
            LogIndexSizeMaxBytes = 0,
            LogLocalRetentionBytes = 0,
            LogLocalRetentionMs = 0,
            LogMessageDownconversionEnable = false,
            LogMessageTimestampDifferenceMaxMs = 0,
            LogMessageTimestampType = "string",
            LogPreallocate = false,
            LogRetentionBytes = 0,
            LogRetentionHours = 0,
            LogRetentionMs = 0,
            LogRollJitterMs = 0,
            LogRollMs = 0,
            LogSegmentBytes = 0,
            LogSegmentDeleteDelayMs = 0,
            MaxConnectionsPerIp = 0,
            MaxIncrementalFetchSessionCacheSlots = 0,
            MessageMaxBytes = 0,
            MinInsyncReplicas = 0,
            NumPartitions = 0,
            OffsetsRetentionMinutes = 0,
            ProducerPurgatoryPurgeIntervalRequests = 0,
            ReplicaFetchMaxBytes = 0,
            ReplicaFetchResponseMaxBytes = 0,
            SaslOauthbearerExpectedAudience = "string",
            SaslOauthbearerExpectedIssuer = "string",
            SaslOauthbearerJwksEndpointUrl = "string",
            SaslOauthbearerSubClaimName = "string",
            SocketRequestMaxBytes = 0,
            TransactionPartitionVerificationEnable = false,
            TransactionRemoveExpiredTransactionCleanupIntervalMs = 0,
            TransactionStateLogSegmentBytes = 0,
        },
        KafkaAuthenticationMethods = new Aiven.Inputs.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs
        {
            Certificate = false,
            Sasl = false,
        },
        KafkaConnect = false,
        KafkaConnectConfig = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectConfigArgs
        {
            ConnectorClientConfigOverridePolicy = "string",
            ConsumerAutoOffsetReset = "string",
            ConsumerFetchMaxBytes = 0,
            ConsumerIsolationLevel = "string",
            ConsumerMaxPartitionFetchBytes = 0,
            ConsumerMaxPollIntervalMs = 0,
            ConsumerMaxPollRecords = 0,
            OffsetFlushIntervalMs = 0,
            OffsetFlushTimeoutMs = 0,
            ProducerBatchSize = 0,
            ProducerBufferMemory = 0,
            ProducerCompressionType = "string",
            ProducerLingerMs = 0,
            ProducerMaxRequestSize = 0,
            ScheduledRebalanceMaxDelayMs = 0,
            SessionTimeoutMs = 0,
        },
        KafkaConnectSecretProviders = new[]
        {
            new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs
            {
                Name = "string",
                Aws = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs
                {
                    AuthMethod = "string",
                    Region = "string",
                    AccessKey = "string",
                    SecretKey = "string",
                },
                Vault = new Aiven.Inputs.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs
                {
                    Address = "string",
                    AuthMethod = "string",
                    EngineVersion = 0,
                    PrefixPathDepth = 0,
                    Token = "string",
                },
            },
        },
        KafkaRest = false,
        KafkaRestAuthorization = false,
        KafkaRestConfig = new Aiven.Inputs.KafkaKafkaUserConfigKafkaRestConfigArgs
        {
            ConsumerEnableAutoCommit = false,
            ConsumerIdleDisconnectTimeout = 0,
            ConsumerRequestMaxBytes = 0,
            ConsumerRequestTimeoutMs = 0,
            NameStrategy = "string",
            NameStrategyValidation = false,
            ProducerAcks = "string",
            ProducerCompressionType = "string",
            ProducerLingerMs = 0,
            ProducerMaxRequestSize = 0,
            SimpleconsumerPoolSizeMax = 0,
        },
        KafkaSaslMechanisms = new Aiven.Inputs.KafkaKafkaUserConfigKafkaSaslMechanismsArgs
        {
            Plain = false,
            ScramSha256 = false,
            ScramSha512 = false,
        },
        KafkaVersion = "string",
        LetsencryptSaslPrivatelink = false,
        PrivateAccess = new Aiven.Inputs.KafkaKafkaUserConfigPrivateAccessArgs
        {
            Kafka = false,
            KafkaConnect = false,
            KafkaRest = false,
            Prometheus = false,
            SchemaRegistry = false,
        },
        PrivatelinkAccess = new Aiven.Inputs.KafkaKafkaUserConfigPrivatelinkAccessArgs
        {
            Jolokia = false,
            Kafka = false,
            KafkaConnect = false,
            KafkaRest = false,
            Prometheus = false,
            SchemaRegistry = false,
        },
        PublicAccess = new Aiven.Inputs.KafkaKafkaUserConfigPublicAccessArgs
        {
            Kafka = false,
            KafkaConnect = false,
            KafkaRest = false,
            Prometheus = false,
            SchemaRegistry = false,
        },
        SchemaRegistry = false,
        SchemaRegistryConfig = new Aiven.Inputs.KafkaKafkaUserConfigSchemaRegistryConfigArgs
        {
            LeaderEligibility = false,
            RetriableErrorsSilenced = false,
            SchemaReaderStrictMode = false,
            TopicName = "string",
        },
        ServiceLog = false,
        SingleZone = new Aiven.Inputs.KafkaKafkaUserConfigSingleZoneArgs
        {
            Enabled = false,
        },
        StaticIps = false,
        TieredStorage = new Aiven.Inputs.KafkaKafkaUserConfigTieredStorageArgs
        {
            Enabled = false,
        },
    },
    ProjectVpcId = "string",
    ServiceIntegrations = new[]
    {
        new Aiven.Inputs.KafkaServiceIntegrationArgs
        {
            IntegrationType = "string",
            SourceServiceName = "string",
        },
    },
    CloudName = "string",
    StaticIps = new[]
    {
        "string",
    },
    Tags = new[]
    {
        new Aiven.Inputs.KafkaTagArgs
        {
            Key = "string",
            Value = "string",
        },
    },
    TechEmails = new[]
    {
        new Aiven.Inputs.KafkaTechEmailArgs
        {
            Email = "string",
        },
    },
    TerminationProtection = false,
});
Copy
example, err := aiven.NewKafka(ctx, "kafkaResource", &aiven.KafkaArgs{
	Plan:                  pulumi.String("string"),
	ServiceName:           pulumi.String("string"),
	Project:               pulumi.String("string"),
	MaintenanceWindowTime: pulumi.String("string"),
	DefaultAcl:            pulumi.Bool(false),
	Kafkas: aiven.KafkaKafkaArray{
		&aiven.KafkaKafkaArgs{
			AccessCert:        pulumi.String("string"),
			AccessKey:         pulumi.String("string"),
			ConnectUri:        pulumi.String("string"),
			RestUri:           pulumi.String("string"),
			SchemaRegistryUri: pulumi.String("string"),
			Uris: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	MaintenanceWindowDow: pulumi.String("string"),
	AdditionalDiskSpace:  pulumi.String("string"),
	KafkaUserConfig: &aiven.KafkaKafkaUserConfigArgs{
		AivenKafkaTopicMessages: pulumi.Bool(false),
		CustomDomain:            pulumi.String("string"),
		FollowerFetching: &aiven.KafkaKafkaUserConfigFollowerFetchingArgs{
			Enabled: pulumi.Bool(false),
		},
		IpFilterObjects: aiven.KafkaKafkaUserConfigIpFilterObjectArray{
			&aiven.KafkaKafkaUserConfigIpFilterObjectArgs{
				Network:     pulumi.String("string"),
				Description: pulumi.String("string"),
			},
		},
		IpFilterStrings: pulumi.StringArray{
			pulumi.String("string"),
		},
		Kafka: &aiven.KafkaKafkaUserConfigKafkaArgs{
			AutoCreateTopicsEnable:                               pulumi.Bool(false),
			CompressionType:                                      pulumi.String("string"),
			ConnectionsMaxIdleMs:                                 pulumi.Int(0),
			DefaultReplicationFactor:                             pulumi.Int(0),
			GroupInitialRebalanceDelayMs:                         pulumi.Int(0),
			GroupMaxSessionTimeoutMs:                             pulumi.Int(0),
			GroupMinSessionTimeoutMs:                             pulumi.Int(0),
			LogCleanerDeleteRetentionMs:                          pulumi.Int(0),
			LogCleanerMaxCompactionLagMs:                         pulumi.Int(0),
			LogCleanerMinCleanableRatio:                          pulumi.Float64(0),
			LogCleanerMinCompactionLagMs:                         pulumi.Int(0),
			LogCleanupPolicy:                                     pulumi.String("string"),
			LogFlushIntervalMessages:                             pulumi.Int(0),
			LogFlushIntervalMs:                                   pulumi.Int(0),
			LogIndexIntervalBytes:                                pulumi.Int(0),
			LogIndexSizeMaxBytes:                                 pulumi.Int(0),
			LogLocalRetentionBytes:                               pulumi.Int(0),
			LogLocalRetentionMs:                                  pulumi.Int(0),
			LogMessageDownconversionEnable:                       pulumi.Bool(false),
			LogMessageTimestampDifferenceMaxMs:                   pulumi.Int(0),
			LogMessageTimestampType:                              pulumi.String("string"),
			LogPreallocate:                                       pulumi.Bool(false),
			LogRetentionBytes:                                    pulumi.Int(0),
			LogRetentionHours:                                    pulumi.Int(0),
			LogRetentionMs:                                       pulumi.Int(0),
			LogRollJitterMs:                                      pulumi.Int(0),
			LogRollMs:                                            pulumi.Int(0),
			LogSegmentBytes:                                      pulumi.Int(0),
			LogSegmentDeleteDelayMs:                              pulumi.Int(0),
			MaxConnectionsPerIp:                                  pulumi.Int(0),
			MaxIncrementalFetchSessionCacheSlots:                 pulumi.Int(0),
			MessageMaxBytes:                                      pulumi.Int(0),
			MinInsyncReplicas:                                    pulumi.Int(0),
			NumPartitions:                                        pulumi.Int(0),
			OffsetsRetentionMinutes:                              pulumi.Int(0),
			ProducerPurgatoryPurgeIntervalRequests:               pulumi.Int(0),
			ReplicaFetchMaxBytes:                                 pulumi.Int(0),
			ReplicaFetchResponseMaxBytes:                         pulumi.Int(0),
			SaslOauthbearerExpectedAudience:                      pulumi.String("string"),
			SaslOauthbearerExpectedIssuer:                        pulumi.String("string"),
			SaslOauthbearerJwksEndpointUrl:                       pulumi.String("string"),
			SaslOauthbearerSubClaimName:                          pulumi.String("string"),
			SocketRequestMaxBytes:                                pulumi.Int(0),
			TransactionPartitionVerificationEnable:               pulumi.Bool(false),
			TransactionRemoveExpiredTransactionCleanupIntervalMs: pulumi.Int(0),
			TransactionStateLogSegmentBytes:                      pulumi.Int(0),
		},
		KafkaAuthenticationMethods: &aiven.KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs{
			Certificate: pulumi.Bool(false),
			Sasl:        pulumi.Bool(false),
		},
		KafkaConnect: pulumi.Bool(false),
		KafkaConnectConfig: &aiven.KafkaKafkaUserConfigKafkaConnectConfigArgs{
			ConnectorClientConfigOverridePolicy: pulumi.String("string"),
			ConsumerAutoOffsetReset:             pulumi.String("string"),
			ConsumerFetchMaxBytes:               pulumi.Int(0),
			ConsumerIsolationLevel:              pulumi.String("string"),
			ConsumerMaxPartitionFetchBytes:      pulumi.Int(0),
			ConsumerMaxPollIntervalMs:           pulumi.Int(0),
			ConsumerMaxPollRecords:              pulumi.Int(0),
			OffsetFlushIntervalMs:               pulumi.Int(0),
			OffsetFlushTimeoutMs:                pulumi.Int(0),
			ProducerBatchSize:                   pulumi.Int(0),
			ProducerBufferMemory:                pulumi.Int(0),
			ProducerCompressionType:             pulumi.String("string"),
			ProducerLingerMs:                    pulumi.Int(0),
			ProducerMaxRequestSize:              pulumi.Int(0),
			ScheduledRebalanceMaxDelayMs:        pulumi.Int(0),
			SessionTimeoutMs:                    pulumi.Int(0),
		},
		KafkaConnectSecretProviders: aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArray{
			&aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderArgs{
				Name: pulumi.String("string"),
				Aws: &aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs{
					AuthMethod: pulumi.String("string"),
					Region:     pulumi.String("string"),
					AccessKey:  pulumi.String("string"),
					SecretKey:  pulumi.String("string"),
				},
				Vault: &aiven.KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs{
					Address:         pulumi.String("string"),
					AuthMethod:      pulumi.String("string"),
					EngineVersion:   pulumi.Int(0),
					PrefixPathDepth: pulumi.Int(0),
					Token:           pulumi.String("string"),
				},
			},
		},
		KafkaRest:              pulumi.Bool(false),
		KafkaRestAuthorization: pulumi.Bool(false),
		KafkaRestConfig: &aiven.KafkaKafkaUserConfigKafkaRestConfigArgs{
			ConsumerEnableAutoCommit:      pulumi.Bool(false),
			ConsumerIdleDisconnectTimeout: pulumi.Int(0),
			ConsumerRequestMaxBytes:       pulumi.Int(0),
			ConsumerRequestTimeoutMs:      pulumi.Int(0),
			NameStrategy:                  pulumi.String("string"),
			NameStrategyValidation:        pulumi.Bool(false),
			ProducerAcks:                  pulumi.String("string"),
			ProducerCompressionType:       pulumi.String("string"),
			ProducerLingerMs:              pulumi.Int(0),
			ProducerMaxRequestSize:        pulumi.Int(0),
			SimpleconsumerPoolSizeMax:     pulumi.Int(0),
		},
		KafkaSaslMechanisms: &aiven.KafkaKafkaUserConfigKafkaSaslMechanismsArgs{
			Plain:       pulumi.Bool(false),
			ScramSha256: pulumi.Bool(false),
			ScramSha512: pulumi.Bool(false),
		},
		KafkaVersion:               pulumi.String("string"),
		LetsencryptSaslPrivatelink: pulumi.Bool(false),
		PrivateAccess: &aiven.KafkaKafkaUserConfigPrivateAccessArgs{
			Kafka:          pulumi.Bool(false),
			KafkaConnect:   pulumi.Bool(false),
			KafkaRest:      pulumi.Bool(false),
			Prometheus:     pulumi.Bool(false),
			SchemaRegistry: pulumi.Bool(false),
		},
		PrivatelinkAccess: &aiven.KafkaKafkaUserConfigPrivatelinkAccessArgs{
			Jolokia:        pulumi.Bool(false),
			Kafka:          pulumi.Bool(false),
			KafkaConnect:   pulumi.Bool(false),
			KafkaRest:      pulumi.Bool(false),
			Prometheus:     pulumi.Bool(false),
			SchemaRegistry: pulumi.Bool(false),
		},
		PublicAccess: &aiven.KafkaKafkaUserConfigPublicAccessArgs{
			Kafka:          pulumi.Bool(false),
			KafkaConnect:   pulumi.Bool(false),
			KafkaRest:      pulumi.Bool(false),
			Prometheus:     pulumi.Bool(false),
			SchemaRegistry: pulumi.Bool(false),
		},
		SchemaRegistry: pulumi.Bool(false),
		SchemaRegistryConfig: &aiven.KafkaKafkaUserConfigSchemaRegistryConfigArgs{
			LeaderEligibility:       pulumi.Bool(false),
			RetriableErrorsSilenced: pulumi.Bool(false),
			SchemaReaderStrictMode:  pulumi.Bool(false),
			TopicName:               pulumi.String("string"),
		},
		ServiceLog: pulumi.Bool(false),
		SingleZone: &aiven.KafkaKafkaUserConfigSingleZoneArgs{
			Enabled: pulumi.Bool(false),
		},
		StaticIps: pulumi.Bool(false),
		TieredStorage: &aiven.KafkaKafkaUserConfigTieredStorageArgs{
			Enabled: pulumi.Bool(false),
		},
	},
	ProjectVpcId: pulumi.String("string"),
	ServiceIntegrations: aiven.KafkaServiceIntegrationArray{
		&aiven.KafkaServiceIntegrationArgs{
			IntegrationType:   pulumi.String("string"),
			SourceServiceName: pulumi.String("string"),
		},
	},
	CloudName: pulumi.String("string"),
	StaticIps: pulumi.StringArray{
		pulumi.String("string"),
	},
	Tags: aiven.KafkaTagArray{
		&aiven.KafkaTagArgs{
			Key:   pulumi.String("string"),
			Value: pulumi.String("string"),
		},
	},
	TechEmails: aiven.KafkaTechEmailArray{
		&aiven.KafkaTechEmailArgs{
			Email: pulumi.String("string"),
		},
	},
	TerminationProtection: pulumi.Bool(false),
})
Copy
var kafkaResource = new Kafka("kafkaResource", KafkaArgs.builder()
    .plan("string")
    .serviceName("string")
    .project("string")
    .maintenanceWindowTime("string")
    .defaultAcl(false)
    .kafkas(KafkaKafkaArgs.builder()
        .accessCert("string")
        .accessKey("string")
        .connectUri("string")
        .restUri("string")
        .schemaRegistryUri("string")
        .uris("string")
        .build())
    .maintenanceWindowDow("string")
    .additionalDiskSpace("string")
    .kafkaUserConfig(KafkaKafkaUserConfigArgs.builder()
        .aivenKafkaTopicMessages(false)
        .customDomain("string")
        .followerFetching(KafkaKafkaUserConfigFollowerFetchingArgs.builder()
            .enabled(false)
            .build())
        .ipFilterObjects(KafkaKafkaUserConfigIpFilterObjectArgs.builder()
            .network("string")
            .description("string")
            .build())
        .ipFilterStrings("string")
        .kafka(KafkaKafkaUserConfigKafkaArgs.builder()
            .autoCreateTopicsEnable(false)
            .compressionType("string")
            .connectionsMaxIdleMs(0)
            .defaultReplicationFactor(0)
            .groupInitialRebalanceDelayMs(0)
            .groupMaxSessionTimeoutMs(0)
            .groupMinSessionTimeoutMs(0)
            .logCleanerDeleteRetentionMs(0)
            .logCleanerMaxCompactionLagMs(0)
            .logCleanerMinCleanableRatio(0)
            .logCleanerMinCompactionLagMs(0)
            .logCleanupPolicy("string")
            .logFlushIntervalMessages(0)
            .logFlushIntervalMs(0)
            .logIndexIntervalBytes(0)
            .logIndexSizeMaxBytes(0)
            .logLocalRetentionBytes(0)
            .logLocalRetentionMs(0)
            .logMessageDownconversionEnable(false)
            .logMessageTimestampDifferenceMaxMs(0)
            .logMessageTimestampType("string")
            .logPreallocate(false)
            .logRetentionBytes(0)
            .logRetentionHours(0)
            .logRetentionMs(0)
            .logRollJitterMs(0)
            .logRollMs(0)
            .logSegmentBytes(0)
            .logSegmentDeleteDelayMs(0)
            .maxConnectionsPerIp(0)
            .maxIncrementalFetchSessionCacheSlots(0)
            .messageMaxBytes(0)
            .minInsyncReplicas(0)
            .numPartitions(0)
            .offsetsRetentionMinutes(0)
            .producerPurgatoryPurgeIntervalRequests(0)
            .replicaFetchMaxBytes(0)
            .replicaFetchResponseMaxBytes(0)
            .saslOauthbearerExpectedAudience("string")
            .saslOauthbearerExpectedIssuer("string")
            .saslOauthbearerJwksEndpointUrl("string")
            .saslOauthbearerSubClaimName("string")
            .socketRequestMaxBytes(0)
            .transactionPartitionVerificationEnable(false)
            .transactionRemoveExpiredTransactionCleanupIntervalMs(0)
            .transactionStateLogSegmentBytes(0)
            .build())
        .kafkaAuthenticationMethods(KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs.builder()
            .certificate(false)
            .sasl(false)
            .build())
        .kafkaConnect(false)
        .kafkaConnectConfig(KafkaKafkaUserConfigKafkaConnectConfigArgs.builder()
            .connectorClientConfigOverridePolicy("string")
            .consumerAutoOffsetReset("string")
            .consumerFetchMaxBytes(0)
            .consumerIsolationLevel("string")
            .consumerMaxPartitionFetchBytes(0)
            .consumerMaxPollIntervalMs(0)
            .consumerMaxPollRecords(0)
            .offsetFlushIntervalMs(0)
            .offsetFlushTimeoutMs(0)
            .producerBatchSize(0)
            .producerBufferMemory(0)
            .producerCompressionType("string")
            .producerLingerMs(0)
            .producerMaxRequestSize(0)
            .scheduledRebalanceMaxDelayMs(0)
            .sessionTimeoutMs(0)
            .build())
        .kafkaConnectSecretProviders(KafkaKafkaUserConfigKafkaConnectSecretProviderArgs.builder()
            .name("string")
            .aws(KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs.builder()
                .authMethod("string")
                .region("string")
                .accessKey("string")
                .secretKey("string")
                .build())
            .vault(KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs.builder()
                .address("string")
                .authMethod("string")
                .engineVersion(0)
                .prefixPathDepth(0)
                .token("string")
                .build())
            .build())
        .kafkaRest(false)
        .kafkaRestAuthorization(false)
        .kafkaRestConfig(KafkaKafkaUserConfigKafkaRestConfigArgs.builder()
            .consumerEnableAutoCommit(false)
            .consumerIdleDisconnectTimeout(0)
            .consumerRequestMaxBytes(0)
            .consumerRequestTimeoutMs(0)
            .nameStrategy("string")
            .nameStrategyValidation(false)
            .producerAcks("string")
            .producerCompressionType("string")
            .producerLingerMs(0)
            .producerMaxRequestSize(0)
            .simpleconsumerPoolSizeMax(0)
            .build())
        .kafkaSaslMechanisms(KafkaKafkaUserConfigKafkaSaslMechanismsArgs.builder()
            .plain(false)
            .scramSha256(false)
            .scramSha512(false)
            .build())
        .kafkaVersion("string")
        .letsencryptSaslPrivatelink(false)
        .privateAccess(KafkaKafkaUserConfigPrivateAccessArgs.builder()
            .kafka(false)
            .kafkaConnect(false)
            .kafkaRest(false)
            .prometheus(false)
            .schemaRegistry(false)
            .build())
        .privatelinkAccess(KafkaKafkaUserConfigPrivatelinkAccessArgs.builder()
            .jolokia(false)
            .kafka(false)
            .kafkaConnect(false)
            .kafkaRest(false)
            .prometheus(false)
            .schemaRegistry(false)
            .build())
        .publicAccess(KafkaKafkaUserConfigPublicAccessArgs.builder()
            .kafka(false)
            .kafkaConnect(false)
            .kafkaRest(false)
            .prometheus(false)
            .schemaRegistry(false)
            .build())
        .schemaRegistry(false)
        .schemaRegistryConfig(KafkaKafkaUserConfigSchemaRegistryConfigArgs.builder()
            .leaderEligibility(false)
            .retriableErrorsSilenced(false)
            .schemaReaderStrictMode(false)
            .topicName("string")
            .build())
        .serviceLog(false)
        .singleZone(KafkaKafkaUserConfigSingleZoneArgs.builder()
            .enabled(false)
            .build())
        .staticIps(false)
        .tieredStorage(KafkaKafkaUserConfigTieredStorageArgs.builder()
            .enabled(false)
            .build())
        .build())
    .projectVpcId("string")
    .serviceIntegrations(KafkaServiceIntegrationArgs.builder()
        .integrationType("string")
        .sourceServiceName("string")
        .build())
    .cloudName("string")
    .staticIps("string")
    .tags(KafkaTagArgs.builder()
        .key("string")
        .value("string")
        .build())
    .techEmails(KafkaTechEmailArgs.builder()
        .email("string")
        .build())
    .terminationProtection(false)
    .build());
Copy
kafka_resource = aiven.Kafka("kafkaResource",
    plan="string",
    service_name="string",
    project="string",
    maintenance_window_time="string",
    default_acl=False,
    kafkas=[{
        "access_cert": "string",
        "access_key": "string",
        "connect_uri": "string",
        "rest_uri": "string",
        "schema_registry_uri": "string",
        "uris": ["string"],
    }],
    maintenance_window_dow="string",
    additional_disk_space="string",
    kafka_user_config={
        "aiven_kafka_topic_messages": False,
        "custom_domain": "string",
        "follower_fetching": {
            "enabled": False,
        },
        "ip_filter_objects": [{
            "network": "string",
            "description": "string",
        }],
        "ip_filter_strings": ["string"],
        "kafka": {
            "auto_create_topics_enable": False,
            "compression_type": "string",
            "connections_max_idle_ms": 0,
            "default_replication_factor": 0,
            "group_initial_rebalance_delay_ms": 0,
            "group_max_session_timeout_ms": 0,
            "group_min_session_timeout_ms": 0,
            "log_cleaner_delete_retention_ms": 0,
            "log_cleaner_max_compaction_lag_ms": 0,
            "log_cleaner_min_cleanable_ratio": 0,
            "log_cleaner_min_compaction_lag_ms": 0,
            "log_cleanup_policy": "string",
            "log_flush_interval_messages": 0,
            "log_flush_interval_ms": 0,
            "log_index_interval_bytes": 0,
            "log_index_size_max_bytes": 0,
            "log_local_retention_bytes": 0,
            "log_local_retention_ms": 0,
            "log_message_downconversion_enable": False,
            "log_message_timestamp_difference_max_ms": 0,
            "log_message_timestamp_type": "string",
            "log_preallocate": False,
            "log_retention_bytes": 0,
            "log_retention_hours": 0,
            "log_retention_ms": 0,
            "log_roll_jitter_ms": 0,
            "log_roll_ms": 0,
            "log_segment_bytes": 0,
            "log_segment_delete_delay_ms": 0,
            "max_connections_per_ip": 0,
            "max_incremental_fetch_session_cache_slots": 0,
            "message_max_bytes": 0,
            "min_insync_replicas": 0,
            "num_partitions": 0,
            "offsets_retention_minutes": 0,
            "producer_purgatory_purge_interval_requests": 0,
            "replica_fetch_max_bytes": 0,
            "replica_fetch_response_max_bytes": 0,
            "sasl_oauthbearer_expected_audience": "string",
            "sasl_oauthbearer_expected_issuer": "string",
            "sasl_oauthbearer_jwks_endpoint_url": "string",
            "sasl_oauthbearer_sub_claim_name": "string",
            "socket_request_max_bytes": 0,
            "transaction_partition_verification_enable": False,
            "transaction_remove_expired_transaction_cleanup_interval_ms": 0,
            "transaction_state_log_segment_bytes": 0,
        },
        "kafka_authentication_methods": {
            "certificate": False,
            "sasl": False,
        },
        "kafka_connect": False,
        "kafka_connect_config": {
            "connector_client_config_override_policy": "string",
            "consumer_auto_offset_reset": "string",
            "consumer_fetch_max_bytes": 0,
            "consumer_isolation_level": "string",
            "consumer_max_partition_fetch_bytes": 0,
            "consumer_max_poll_interval_ms": 0,
            "consumer_max_poll_records": 0,
            "offset_flush_interval_ms": 0,
            "offset_flush_timeout_ms": 0,
            "producer_batch_size": 0,
            "producer_buffer_memory": 0,
            "producer_compression_type": "string",
            "producer_linger_ms": 0,
            "producer_max_request_size": 0,
            "scheduled_rebalance_max_delay_ms": 0,
            "session_timeout_ms": 0,
        },
        "kafka_connect_secret_providers": [{
            "name": "string",
            "aws": {
                "auth_method": "string",
                "region": "string",
                "access_key": "string",
                "secret_key": "string",
            },
            "vault": {
                "address": "string",
                "auth_method": "string",
                "engine_version": 0,
                "prefix_path_depth": 0,
                "token": "string",
            },
        }],
        "kafka_rest": False,
        "kafka_rest_authorization": False,
        "kafka_rest_config": {
            "consumer_enable_auto_commit": False,
            "consumer_idle_disconnect_timeout": 0,
            "consumer_request_max_bytes": 0,
            "consumer_request_timeout_ms": 0,
            "name_strategy": "string",
            "name_strategy_validation": False,
            "producer_acks": "string",
            "producer_compression_type": "string",
            "producer_linger_ms": 0,
            "producer_max_request_size": 0,
            "simpleconsumer_pool_size_max": 0,
        },
        "kafka_sasl_mechanisms": {
            "plain": False,
            "scram_sha256": False,
            "scram_sha512": False,
        },
        "kafka_version": "string",
        "letsencrypt_sasl_privatelink": False,
        "private_access": {
            "kafka": False,
            "kafka_connect": False,
            "kafka_rest": False,
            "prometheus": False,
            "schema_registry": False,
        },
        "privatelink_access": {
            "jolokia": False,
            "kafka": False,
            "kafka_connect": False,
            "kafka_rest": False,
            "prometheus": False,
            "schema_registry": False,
        },
        "public_access": {
            "kafka": False,
            "kafka_connect": False,
            "kafka_rest": False,
            "prometheus": False,
            "schema_registry": False,
        },
        "schema_registry": False,
        "schema_registry_config": {
            "leader_eligibility": False,
            "retriable_errors_silenced": False,
            "schema_reader_strict_mode": False,
            "topic_name": "string",
        },
        "service_log": False,
        "single_zone": {
            "enabled": False,
        },
        "static_ips": False,
        "tiered_storage": {
            "enabled": False,
        },
    },
    project_vpc_id="string",
    service_integrations=[{
        "integration_type": "string",
        "source_service_name": "string",
    }],
    cloud_name="string",
    static_ips=["string"],
    tags=[{
        "key": "string",
        "value": "string",
    }],
    tech_emails=[{
        "email": "string",
    }],
    termination_protection=False)
Copy
const kafkaResource = new aiven.Kafka("kafkaResource", {
    plan: "string",
    serviceName: "string",
    project: "string",
    maintenanceWindowTime: "string",
    defaultAcl: false,
    kafkas: [{
        accessCert: "string",
        accessKey: "string",
        connectUri: "string",
        restUri: "string",
        schemaRegistryUri: "string",
        uris: ["string"],
    }],
    maintenanceWindowDow: "string",
    additionalDiskSpace: "string",
    kafkaUserConfig: {
        aivenKafkaTopicMessages: false,
        customDomain: "string",
        followerFetching: {
            enabled: false,
        },
        ipFilterObjects: [{
            network: "string",
            description: "string",
        }],
        ipFilterStrings: ["string"],
        kafka: {
            autoCreateTopicsEnable: false,
            compressionType: "string",
            connectionsMaxIdleMs: 0,
            defaultReplicationFactor: 0,
            groupInitialRebalanceDelayMs: 0,
            groupMaxSessionTimeoutMs: 0,
            groupMinSessionTimeoutMs: 0,
            logCleanerDeleteRetentionMs: 0,
            logCleanerMaxCompactionLagMs: 0,
            logCleanerMinCleanableRatio: 0,
            logCleanerMinCompactionLagMs: 0,
            logCleanupPolicy: "string",
            logFlushIntervalMessages: 0,
            logFlushIntervalMs: 0,
            logIndexIntervalBytes: 0,
            logIndexSizeMaxBytes: 0,
            logLocalRetentionBytes: 0,
            logLocalRetentionMs: 0,
            logMessageDownconversionEnable: false,
            logMessageTimestampDifferenceMaxMs: 0,
            logMessageTimestampType: "string",
            logPreallocate: false,
            logRetentionBytes: 0,
            logRetentionHours: 0,
            logRetentionMs: 0,
            logRollJitterMs: 0,
            logRollMs: 0,
            logSegmentBytes: 0,
            logSegmentDeleteDelayMs: 0,
            maxConnectionsPerIp: 0,
            maxIncrementalFetchSessionCacheSlots: 0,
            messageMaxBytes: 0,
            minInsyncReplicas: 0,
            numPartitions: 0,
            offsetsRetentionMinutes: 0,
            producerPurgatoryPurgeIntervalRequests: 0,
            replicaFetchMaxBytes: 0,
            replicaFetchResponseMaxBytes: 0,
            saslOauthbearerExpectedAudience: "string",
            saslOauthbearerExpectedIssuer: "string",
            saslOauthbearerJwksEndpointUrl: "string",
            saslOauthbearerSubClaimName: "string",
            socketRequestMaxBytes: 0,
            transactionPartitionVerificationEnable: false,
            transactionRemoveExpiredTransactionCleanupIntervalMs: 0,
            transactionStateLogSegmentBytes: 0,
        },
        kafkaAuthenticationMethods: {
            certificate: false,
            sasl: false,
        },
        kafkaConnect: false,
        kafkaConnectConfig: {
            connectorClientConfigOverridePolicy: "string",
            consumerAutoOffsetReset: "string",
            consumerFetchMaxBytes: 0,
            consumerIsolationLevel: "string",
            consumerMaxPartitionFetchBytes: 0,
            consumerMaxPollIntervalMs: 0,
            consumerMaxPollRecords: 0,
            offsetFlushIntervalMs: 0,
            offsetFlushTimeoutMs: 0,
            producerBatchSize: 0,
            producerBufferMemory: 0,
            producerCompressionType: "string",
            producerLingerMs: 0,
            producerMaxRequestSize: 0,
            scheduledRebalanceMaxDelayMs: 0,
            sessionTimeoutMs: 0,
        },
        kafkaConnectSecretProviders: [{
            name: "string",
            aws: {
                authMethod: "string",
                region: "string",
                accessKey: "string",
                secretKey: "string",
            },
            vault: {
                address: "string",
                authMethod: "string",
                engineVersion: 0,
                prefixPathDepth: 0,
                token: "string",
            },
        }],
        kafkaRest: false,
        kafkaRestAuthorization: false,
        kafkaRestConfig: {
            consumerEnableAutoCommit: false,
            consumerIdleDisconnectTimeout: 0,
            consumerRequestMaxBytes: 0,
            consumerRequestTimeoutMs: 0,
            nameStrategy: "string",
            nameStrategyValidation: false,
            producerAcks: "string",
            producerCompressionType: "string",
            producerLingerMs: 0,
            producerMaxRequestSize: 0,
            simpleconsumerPoolSizeMax: 0,
        },
        kafkaSaslMechanisms: {
            plain: false,
            scramSha256: false,
            scramSha512: false,
        },
        kafkaVersion: "string",
        letsencryptSaslPrivatelink: false,
        privateAccess: {
            kafka: false,
            kafkaConnect: false,
            kafkaRest: false,
            prometheus: false,
            schemaRegistry: false,
        },
        privatelinkAccess: {
            jolokia: false,
            kafka: false,
            kafkaConnect: false,
            kafkaRest: false,
            prometheus: false,
            schemaRegistry: false,
        },
        publicAccess: {
            kafka: false,
            kafkaConnect: false,
            kafkaRest: false,
            prometheus: false,
            schemaRegistry: false,
        },
        schemaRegistry: false,
        schemaRegistryConfig: {
            leaderEligibility: false,
            retriableErrorsSilenced: false,
            schemaReaderStrictMode: false,
            topicName: "string",
        },
        serviceLog: false,
        singleZone: {
            enabled: false,
        },
        staticIps: false,
        tieredStorage: {
            enabled: false,
        },
    },
    projectVpcId: "string",
    serviceIntegrations: [{
        integrationType: "string",
        sourceServiceName: "string",
    }],
    cloudName: "string",
    staticIps: ["string"],
    tags: [{
        key: "string",
        value: "string",
    }],
    techEmails: [{
        email: "string",
    }],
    terminationProtection: false,
});
Copy
type: aiven:Kafka
properties:
    additionalDiskSpace: string
    cloudName: string
    defaultAcl: false
    kafkaUserConfig:
        aivenKafkaTopicMessages: false
        customDomain: string
        followerFetching:
            enabled: false
        ipFilterObjects:
            - description: string
              network: string
        ipFilterStrings:
            - string
        kafka:
            autoCreateTopicsEnable: false
            compressionType: string
            connectionsMaxIdleMs: 0
            defaultReplicationFactor: 0
            groupInitialRebalanceDelayMs: 0
            groupMaxSessionTimeoutMs: 0
            groupMinSessionTimeoutMs: 0
            logCleanerDeleteRetentionMs: 0
            logCleanerMaxCompactionLagMs: 0
            logCleanerMinCleanableRatio: 0
            logCleanerMinCompactionLagMs: 0
            logCleanupPolicy: string
            logFlushIntervalMessages: 0
            logFlushIntervalMs: 0
            logIndexIntervalBytes: 0
            logIndexSizeMaxBytes: 0
            logLocalRetentionBytes: 0
            logLocalRetentionMs: 0
            logMessageDownconversionEnable: false
            logMessageTimestampDifferenceMaxMs: 0
            logMessageTimestampType: string
            logPreallocate: false
            logRetentionBytes: 0
            logRetentionHours: 0
            logRetentionMs: 0
            logRollJitterMs: 0
            logRollMs: 0
            logSegmentBytes: 0
            logSegmentDeleteDelayMs: 0
            maxConnectionsPerIp: 0
            maxIncrementalFetchSessionCacheSlots: 0
            messageMaxBytes: 0
            minInsyncReplicas: 0
            numPartitions: 0
            offsetsRetentionMinutes: 0
            producerPurgatoryPurgeIntervalRequests: 0
            replicaFetchMaxBytes: 0
            replicaFetchResponseMaxBytes: 0
            saslOauthbearerExpectedAudience: string
            saslOauthbearerExpectedIssuer: string
            saslOauthbearerJwksEndpointUrl: string
            saslOauthbearerSubClaimName: string
            socketRequestMaxBytes: 0
            transactionPartitionVerificationEnable: false
            transactionRemoveExpiredTransactionCleanupIntervalMs: 0
            transactionStateLogSegmentBytes: 0
        kafkaAuthenticationMethods:
            certificate: false
            sasl: false
        kafkaConnect: false
        kafkaConnectConfig:
            connectorClientConfigOverridePolicy: string
            consumerAutoOffsetReset: string
            consumerFetchMaxBytes: 0
            consumerIsolationLevel: string
            consumerMaxPartitionFetchBytes: 0
            consumerMaxPollIntervalMs: 0
            consumerMaxPollRecords: 0
            offsetFlushIntervalMs: 0
            offsetFlushTimeoutMs: 0
            producerBatchSize: 0
            producerBufferMemory: 0
            producerCompressionType: string
            producerLingerMs: 0
            producerMaxRequestSize: 0
            scheduledRebalanceMaxDelayMs: 0
            sessionTimeoutMs: 0
        kafkaConnectSecretProviders:
            - aws:
                accessKey: string
                authMethod: string
                region: string
                secretKey: string
              name: string
              vault:
                address: string
                authMethod: string
                engineVersion: 0
                prefixPathDepth: 0
                token: string
        kafkaRest: false
        kafkaRestAuthorization: false
        kafkaRestConfig:
            consumerEnableAutoCommit: false
            consumerIdleDisconnectTimeout: 0
            consumerRequestMaxBytes: 0
            consumerRequestTimeoutMs: 0
            nameStrategy: string
            nameStrategyValidation: false
            producerAcks: string
            producerCompressionType: string
            producerLingerMs: 0
            producerMaxRequestSize: 0
            simpleconsumerPoolSizeMax: 0
        kafkaSaslMechanisms:
            plain: false
            scramSha256: false
            scramSha512: false
        kafkaVersion: string
        letsencryptSaslPrivatelink: false
        privateAccess:
            kafka: false
            kafkaConnect: false
            kafkaRest: false
            prometheus: false
            schemaRegistry: false
        privatelinkAccess:
            jolokia: false
            kafka: false
            kafkaConnect: false
            kafkaRest: false
            prometheus: false
            schemaRegistry: false
        publicAccess:
            kafka: false
            kafkaConnect: false
            kafkaRest: false
            prometheus: false
            schemaRegistry: false
        schemaRegistry: false
        schemaRegistryConfig:
            leaderEligibility: false
            retriableErrorsSilenced: false
            schemaReaderStrictMode: false
            topicName: string
        serviceLog: false
        singleZone:
            enabled: false
        staticIps: false
        tieredStorage:
            enabled: false
    kafkas:
        - accessCert: string
          accessKey: string
          connectUri: string
          restUri: string
          schemaRegistryUri: string
          uris:
            - string
    maintenanceWindowDow: string
    maintenanceWindowTime: string
    plan: string
    project: string
    projectVpcId: string
    serviceIntegrations:
        - integrationType: string
          sourceServiceName: string
    serviceName: string
    staticIps:
        - string
    tags:
        - key: string
          value: string
    techEmails:
        - email: string
    terminationProtection: false
Copy

Kafka Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The Kafka resource accepts the following input properties:

Plan This property is required. string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
Project
This property is required.
Changes to this property will trigger replacement.
string
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
ServiceName
This property is required.
Changes to this property will trigger replacement.
string
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
AdditionalDiskSpace string
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
CloudName string
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
DefaultAcl Changes to this property will trigger replacement. bool
Create a default wildcard Kafka ACL.
DiskSpace string
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

KafkaServer List<KafkaKafka>
Kafka server connection details.
KafkaUserConfig KafkaKafkaUserConfig
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
Karapace bool
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

MaintenanceWindowDow string
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
MaintenanceWindowTime string
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
ProjectVpcId string
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
ServiceIntegrations List<KafkaServiceIntegration>
Service integrations to specify when creating a service. Not applied after initial service creation
StaticIps List<string>
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
Tags List<KafkaTag>
Tags are key-value pairs that allow you to categorize services.
TechEmails List<KafkaTechEmail>
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
TerminationProtection bool
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
Plan This property is required. string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
Project
This property is required.
Changes to this property will trigger replacement.
string
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
ServiceName
This property is required.
Changes to this property will trigger replacement.
string
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
AdditionalDiskSpace string
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
CloudName string
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
DefaultAcl Changes to this property will trigger replacement. bool
Create a default wildcard Kafka ACL.
DiskSpace string
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

KafkaUserConfig KafkaKafkaUserConfigArgs
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
Kafkas []KafkaKafkaArgs
Kafka server connection details.
Karapace bool
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

MaintenanceWindowDow string
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
MaintenanceWindowTime string
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
ProjectVpcId string
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
ServiceIntegrations []KafkaServiceIntegrationArgs
Service integrations to specify when creating a service. Not applied after initial service creation
StaticIps []string
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
Tags []KafkaTagArgs
Tags are key-value pairs that allow you to categorize services.
TechEmails []KafkaTechEmailArgs
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
TerminationProtection bool
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
plan This property is required. String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project
This property is required.
Changes to this property will trigger replacement.
String
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
serviceName
This property is required.
Changes to this property will trigger replacement.
String
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
additionalDiskSpace String
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloudName String
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
defaultAcl Changes to this property will trigger replacement. Boolean
Create a default wildcard Kafka ACL.
diskSpace String
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafkaUserConfig KafkaKafkaUserConfig
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas List<KafkaKafka>
Kafka server connection details.
karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenanceWindowDow String
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenanceWindowTime String
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
projectVpcId String
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
serviceIntegrations List<KafkaServiceIntegration>
Service integrations to specify when creating a service. Not applied after initial service creation
staticIps List<String>
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags List<KafkaTag>
Tags are key-value pairs that allow you to categorize services.
techEmails List<KafkaTechEmail>
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
terminationProtection Boolean
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
plan This property is required. string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project
This property is required.
Changes to this property will trigger replacement.
string
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
serviceName
This property is required.
Changes to this property will trigger replacement.
string
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
additionalDiskSpace string
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloudName string
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
defaultAcl Changes to this property will trigger replacement. boolean
Create a default wildcard Kafka ACL.
diskSpace string
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafkaUserConfig KafkaKafkaUserConfig
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas KafkaKafka[]
Kafka server connection details.
karapace boolean
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenanceWindowDow string
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenanceWindowTime string
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
projectVpcId string
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
serviceIntegrations KafkaServiceIntegration[]
Service integrations to specify when creating a service. Not applied after initial service creation
staticIps string[]
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags KafkaTag[]
Tags are key-value pairs that allow you to categorize services.
techEmails KafkaTechEmail[]
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
terminationProtection boolean
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
plan This property is required. str
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project
This property is required.
Changes to this property will trigger replacement.
str
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
service_name
This property is required.
Changes to this property will trigger replacement.
str
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
additional_disk_space str
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloud_name str
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
default_acl Changes to this property will trigger replacement. bool
Create a default wildcard Kafka ACL.
disk_space str
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafka_user_config KafkaKafkaUserConfigArgs
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas Sequence[KafkaKafkaArgs]
Kafka server connection details.
karapace bool
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenance_window_dow str
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenance_window_time str
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
project_vpc_id str
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
service_integrations Sequence[KafkaServiceIntegrationArgs]
Service integrations to specify when creating a service. Not applied after initial service creation
static_ips Sequence[str]
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags Sequence[KafkaTagArgs]
Tags are key-value pairs that allow you to categorize services.
tech_emails Sequence[KafkaTechEmailArgs]
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
termination_protection bool
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
plan This property is required. String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project
This property is required.
Changes to this property will trigger replacement.
String
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
serviceName
This property is required.
Changes to this property will trigger replacement.
String
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
additionalDiskSpace String
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloudName String
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
defaultAcl Changes to this property will trigger replacement. Boolean
Create a default wildcard Kafka ACL.
diskSpace String
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafkaUserConfig Property Map
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas List<Property Map>
Kafka server connection details.
karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenanceWindowDow String
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenanceWindowTime String
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
projectVpcId String
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
serviceIntegrations List<Property Map>
Service integrations to specify when creating a service. Not applied after initial service creation
staticIps List<String>
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags List<Property Map>
Tags are key-value pairs that allow you to categorize services.
techEmails List<Property Map>
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
terminationProtection Boolean
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

Outputs

All input properties are implicitly available as output properties. Additionally, the Kafka resource produces the following output properties:

Components List<KafkaComponent>
Service component information objects
DiskSpaceCap string
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
DiskSpaceDefault string
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
DiskSpaceStep string
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
DiskSpaceUsed string
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

Id string
The provider-assigned unique ID for this managed resource.
ServiceHost string
The hostname of the service.
ServicePassword string
Password used for connecting to the service, if applicable
ServicePort int
The port of the service
ServiceType string
Aiven internal service type code
ServiceUri string
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
ServiceUsername string
Username used for connecting to the service, if applicable
State string
Components []KafkaComponent
Service component information objects
DiskSpaceCap string
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
DiskSpaceDefault string
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
DiskSpaceStep string
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
DiskSpaceUsed string
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

Id string
The provider-assigned unique ID for this managed resource.
ServiceHost string
The hostname of the service.
ServicePassword string
Password used for connecting to the service, if applicable
ServicePort int
The port of the service
ServiceType string
Aiven internal service type code
ServiceUri string
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
ServiceUsername string
Username used for connecting to the service, if applicable
State string
components List<KafkaComponent>
Service component information objects
diskSpaceCap String
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
diskSpaceDefault String
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
diskSpaceStep String
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
diskSpaceUsed String
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

id String
The provider-assigned unique ID for this managed resource.
serviceHost String
The hostname of the service.
servicePassword String
Password used for connecting to the service, if applicable
servicePort Integer
The port of the service
serviceType String
Aiven internal service type code
serviceUri String
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
serviceUsername String
Username used for connecting to the service, if applicable
state String
components KafkaComponent[]
Service component information objects
diskSpaceCap string
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
diskSpaceDefault string
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
diskSpaceStep string
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
diskSpaceUsed string
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

id string
The provider-assigned unique ID for this managed resource.
serviceHost string
The hostname of the service.
servicePassword string
Password used for connecting to the service, if applicable
servicePort number
The port of the service
serviceType string
Aiven internal service type code
serviceUri string
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
serviceUsername string
Username used for connecting to the service, if applicable
state string
components Sequence[KafkaComponent]
Service component information objects
disk_space_cap str
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
disk_space_default str
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
disk_space_step str
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
disk_space_used str
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

id str
The provider-assigned unique ID for this managed resource.
service_host str
The hostname of the service.
service_password str
Password used for connecting to the service, if applicable
service_port int
The port of the service
service_type str
Aiven internal service type code
service_uri str
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
service_username str
Username used for connecting to the service, if applicable
state str
components List<Property Map>
Service component information objects
diskSpaceCap String
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
diskSpaceDefault String
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
diskSpaceStep String
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
diskSpaceUsed String
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

id String
The provider-assigned unique ID for this managed resource.
serviceHost String
The hostname of the service.
servicePassword String
Password used for connecting to the service, if applicable
servicePort Number
The port of the service
serviceType String
Aiven internal service type code
serviceUri String
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
serviceUsername String
Username used for connecting to the service, if applicable
state String

Look up Existing Kafka Resource

Get an existing Kafka resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: KafkaState, opts?: CustomResourceOptions): Kafka
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        additional_disk_space: Optional[str] = None,
        cloud_name: Optional[str] = None,
        components: Optional[Sequence[KafkaComponentArgs]] = None,
        default_acl: Optional[bool] = None,
        disk_space: Optional[str] = None,
        disk_space_cap: Optional[str] = None,
        disk_space_default: Optional[str] = None,
        disk_space_step: Optional[str] = None,
        disk_space_used: Optional[str] = None,
        kafka_user_config: Optional[KafkaKafkaUserConfigArgs] = None,
        kafkas: Optional[Sequence[KafkaKafkaArgs]] = None,
        karapace: Optional[bool] = None,
        maintenance_window_dow: Optional[str] = None,
        maintenance_window_time: Optional[str] = None,
        plan: Optional[str] = None,
        project: Optional[str] = None,
        project_vpc_id: Optional[str] = None,
        service_host: Optional[str] = None,
        service_integrations: Optional[Sequence[KafkaServiceIntegrationArgs]] = None,
        service_name: Optional[str] = None,
        service_password: Optional[str] = None,
        service_port: Optional[int] = None,
        service_type: Optional[str] = None,
        service_uri: Optional[str] = None,
        service_username: Optional[str] = None,
        state: Optional[str] = None,
        static_ips: Optional[Sequence[str]] = None,
        tags: Optional[Sequence[KafkaTagArgs]] = None,
        tech_emails: Optional[Sequence[KafkaTechEmailArgs]] = None,
        termination_protection: Optional[bool] = None) -> Kafka
func GetKafka(ctx *Context, name string, id IDInput, state *KafkaState, opts ...ResourceOption) (*Kafka, error)
public static Kafka Get(string name, Input<string> id, KafkaState? state, CustomResourceOptions? opts = null)
public static Kafka get(String name, Output<String> id, KafkaState state, CustomResourceOptions options)
resources:  _:    type: aiven:Kafka    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AdditionalDiskSpace string
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
CloudName string
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
Components List<KafkaComponent>
Service component information objects
DefaultAcl Changes to this property will trigger replacement. bool
Create a default wildcard Kafka ACL.
DiskSpace string
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

DiskSpaceCap string
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
DiskSpaceDefault string
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
DiskSpaceStep string
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
DiskSpaceUsed string
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

KafkaServer List<KafkaKafka>
Kafka server connection details.
KafkaUserConfig KafkaKafkaUserConfig
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
Karapace bool
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

MaintenanceWindowDow string
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
MaintenanceWindowTime string
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
Plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
Project Changes to this property will trigger replacement. string
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
ProjectVpcId string
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
ServiceHost string
The hostname of the service.
ServiceIntegrations List<KafkaServiceIntegration>
Service integrations to specify when creating a service. Not applied after initial service creation
ServiceName Changes to this property will trigger replacement. string
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
ServicePassword string
Password used for connecting to the service, if applicable
ServicePort int
The port of the service
ServiceType string
Aiven internal service type code
ServiceUri string
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
ServiceUsername string
Username used for connecting to the service, if applicable
State string
StaticIps List<string>
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
Tags List<KafkaTag>
Tags are key-value pairs that allow you to categorize services.
TechEmails List<KafkaTechEmail>
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
TerminationProtection bool
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
AdditionalDiskSpace string
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
CloudName string
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
Components []KafkaComponentArgs
Service component information objects
DefaultAcl Changes to this property will trigger replacement. bool
Create a default wildcard Kafka ACL.
DiskSpace string
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

DiskSpaceCap string
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
DiskSpaceDefault string
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
DiskSpaceStep string
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
DiskSpaceUsed string
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

KafkaUserConfig KafkaKafkaUserConfigArgs
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
Kafkas []KafkaKafkaArgs
Kafka server connection details.
Karapace bool
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

MaintenanceWindowDow string
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
MaintenanceWindowTime string
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
Plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
Project Changes to this property will trigger replacement. string
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
ProjectVpcId string
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
ServiceHost string
The hostname of the service.
ServiceIntegrations []KafkaServiceIntegrationArgs
Service integrations to specify when creating a service. Not applied after initial service creation
ServiceName Changes to this property will trigger replacement. string
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
ServicePassword string
Password used for connecting to the service, if applicable
ServicePort int
The port of the service
ServiceType string
Aiven internal service type code
ServiceUri string
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
ServiceUsername string
Username used for connecting to the service, if applicable
State string
StaticIps []string
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
Tags []KafkaTagArgs
Tags are key-value pairs that allow you to categorize services.
TechEmails []KafkaTechEmailArgs
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
TerminationProtection bool
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
additionalDiskSpace String
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloudName String
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
components List<KafkaComponent>
Service component information objects
defaultAcl Changes to this property will trigger replacement. Boolean
Create a default wildcard Kafka ACL.
diskSpace String
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

diskSpaceCap String
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
diskSpaceDefault String
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
diskSpaceStep String
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
diskSpaceUsed String
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafkaUserConfig KafkaKafkaUserConfig
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas List<KafkaKafka>
Kafka server connection details.
karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenanceWindowDow String
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenanceWindowTime String
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
plan String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project Changes to this property will trigger replacement. String
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
projectVpcId String
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
serviceHost String
The hostname of the service.
serviceIntegrations List<KafkaServiceIntegration>
Service integrations to specify when creating a service. Not applied after initial service creation
serviceName Changes to this property will trigger replacement. String
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
servicePassword String
Password used for connecting to the service, if applicable
servicePort Integer
The port of the service
serviceType String
Aiven internal service type code
serviceUri String
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
serviceUsername String
Username used for connecting to the service, if applicable
state String
staticIps List<String>
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags List<KafkaTag>
Tags are key-value pairs that allow you to categorize services.
techEmails List<KafkaTechEmail>
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
terminationProtection Boolean
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
additionalDiskSpace string
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloudName string
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
components KafkaComponent[]
Service component information objects
defaultAcl Changes to this property will trigger replacement. boolean
Create a default wildcard Kafka ACL.
diskSpace string
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

diskSpaceCap string
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
diskSpaceDefault string
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
diskSpaceStep string
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
diskSpaceUsed string
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafkaUserConfig KafkaKafkaUserConfig
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas KafkaKafka[]
Kafka server connection details.
karapace boolean
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenanceWindowDow string
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenanceWindowTime string
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
plan string
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project Changes to this property will trigger replacement. string
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
projectVpcId string
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
serviceHost string
The hostname of the service.
serviceIntegrations KafkaServiceIntegration[]
Service integrations to specify when creating a service. Not applied after initial service creation
serviceName Changes to this property will trigger replacement. string
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
servicePassword string
Password used for connecting to the service, if applicable
servicePort number
The port of the service
serviceType string
Aiven internal service type code
serviceUri string
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
serviceUsername string
Username used for connecting to the service, if applicable
state string
staticIps string[]
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags KafkaTag[]
Tags are key-value pairs that allow you to categorize services.
techEmails KafkaTechEmail[]
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
terminationProtection boolean
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
additional_disk_space str
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloud_name str
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
components Sequence[KafkaComponentArgs]
Service component information objects
default_acl Changes to this property will trigger replacement. bool
Create a default wildcard Kafka ACL.
disk_space str
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

disk_space_cap str
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
disk_space_default str
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
disk_space_step str
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
disk_space_used str
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafka_user_config KafkaKafkaUserConfigArgs
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas Sequence[KafkaKafkaArgs]
Kafka server connection details.
karapace bool
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenance_window_dow str
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenance_window_time str
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
plan str
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project Changes to this property will trigger replacement. str
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
project_vpc_id str
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
service_host str
The hostname of the service.
service_integrations Sequence[KafkaServiceIntegrationArgs]
Service integrations to specify when creating a service. Not applied after initial service creation
service_name Changes to this property will trigger replacement. str
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
service_password str
Password used for connecting to the service, if applicable
service_port int
The port of the service
service_type str
Aiven internal service type code
service_uri str
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
service_username str
Username used for connecting to the service, if applicable
state str
static_ips Sequence[str]
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags Sequence[KafkaTagArgs]
Tags are key-value pairs that allow you to categorize services.
tech_emails Sequence[KafkaTechEmailArgs]
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
termination_protection bool
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.
additionalDiskSpace String
Add disk storage in increments of 30 GiB to scale your service. The maximum value depends on the service type and cloud provider. Removing additional storage causes the service nodes to go through a rolling restart, and there might be a short downtime for services without an autoscaler integration or high availability capabilities. The field can be safely removed when autoscaler is enabled without causing any changes.
cloudName String
The cloud provider and region the service is hosted in. The format is provider-region, for example: google-europe-west1. The available cloud regions can differ per project and service. Changing this value migrates the service to another cloud provider or region. The migration runs in the background and includes a DNS update to redirect traffic to the new region. Most services experience no downtime, but some databases may have a brief interruption during DNS propagation.
components List<Property Map>
Service component information objects
defaultAcl Changes to this property will trigger replacement. Boolean
Create a default wildcard Kafka ACL.
diskSpace String
Service disk space. Possible values depend on the service type, the cloud provider and the project. Therefore, reducing will result in the service rebalancing.

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

diskSpaceCap String
The maximum disk space of the service, possible values depend on the service type, the cloud provider and the project.
diskSpaceDefault String
The default disk space of the service, possible values depend on the service type, the cloud provider and the project. Its also the minimum value for disk_space
diskSpaceStep String
The default disk space step of the service, possible values depend on the service type, the cloud provider and the project. disk_space needs to increment from disk_space_default by increments of this size.
diskSpaceUsed String
Disk space that service is currently using

Deprecated: This will be removed in v5.0.0. Please use additional_disk_space to specify the space to be added to the default disk_space defined by the plan.

kafkaUserConfig Property Map
Kafka user configurable settings. Warning: There's no way to reset advanced configuration options to default. Options that you add cannot be removed later
kafkas List<Property Map>
Kafka server connection details.
karapace Boolean
Switch the service to use Karapace for schema registry and REST proxy.

Deprecated: Usage of this field is discouraged.

maintenanceWindowDow String
Day of week when maintenance operations should be performed. One monday, tuesday, wednesday, etc.
maintenanceWindowTime String
Time of day when maintenance operations should be performed. UTC time in HH:mm:ss format.
plan String
Defines what kind of computing resources are allocated for the service. It can be changed after creation, though there are some restrictions when going to a smaller plan such as the new plan must have sufficient amount of disk space to store all current data and switching to a plan with fewer nodes might not be supported. The basic plan names are hobbyist, startup-x, business-x and premium-x where x is (roughly) the amount of memory on each node (also other attributes like number of CPUs and amount of disk space varies but naming is based on memory). The available options can be seen from the Aiven pricing page.
project Changes to this property will trigger replacement. String
The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
projectVpcId String
Specifies the VPC the service should run in. If the value is not set the service is not run inside a VPC. When set, the value should be given as a reference to set up dependencies correctly and the VPC must be in the same cloud and region as the service itself. Project can be freely moved to and from VPC after creation but doing so triggers migration to new servers so the operation can take significant amount of time to complete if the service has a lot of data.
serviceHost String
The hostname of the service.
serviceIntegrations List<Property Map>
Service integrations to specify when creating a service. Not applied after initial service creation
serviceName Changes to this property will trigger replacement. String
Specifies the actual name of the service. The name cannot be changed later without destroying and re-creating the service so name should be picked based on intended service usage rather than current attributes.
servicePassword String
Password used for connecting to the service, if applicable
servicePort Number
The port of the service
serviceType String
Aiven internal service type code
serviceUri String
URI for connecting to the service. Service specific info is under "kafka", "pg", etc.
serviceUsername String
Username used for connecting to the service, if applicable
state String
staticIps List<String>
Static IPs that are going to be associated with this service. Please assign a value using the 'toset' function. Once a static ip resource is in the 'assigned' state it cannot be unbound from the node again
tags List<Property Map>
Tags are key-value pairs that allow you to categorize services.
techEmails List<Property Map>
The email addresses for service contacts, who will receive important alerts and updates about this service. You can also set email contacts at the project level.
terminationProtection Boolean
Prevents the service from being deleted. It is recommended to set this to true for all production services to prevent unintentional service deletion. This does not shield against deleting databases or topics but for services with backups much of the content can at least be restored from backup in case accidental deletion is done.

Supporting Types

KafkaComponent
, KafkaComponentArgs

Component string
Service component name
ConnectionUri string
Connection info for connecting to the service component. This is a combination of host and port.
Host string
Host name for connecting to the service component
KafkaAuthenticationMethod string
Kafka authentication method. This is a value specific to the 'kafka' service component
KafkaSslCa string
Kafka certificate used. The possible values are letsencrypt and project_ca.
Port int
Port number for connecting to the service component
Route string
Network access route
Ssl bool
Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
Usage string
DNS usage name
Component string
Service component name
ConnectionUri string
Connection info for connecting to the service component. This is a combination of host and port.
Host string
Host name for connecting to the service component
KafkaAuthenticationMethod string
Kafka authentication method. This is a value specific to the 'kafka' service component
KafkaSslCa string
Kafka certificate used. The possible values are letsencrypt and project_ca.
Port int
Port number for connecting to the service component
Route string
Network access route
Ssl bool
Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
Usage string
DNS usage name
component String
Service component name
connectionUri String
Connection info for connecting to the service component. This is a combination of host and port.
host String
Host name for connecting to the service component
kafkaAuthenticationMethod String
Kafka authentication method. This is a value specific to the 'kafka' service component
kafkaSslCa String
Kafka certificate used. The possible values are letsencrypt and project_ca.
port Integer
Port number for connecting to the service component
route String
Network access route
ssl Boolean
Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
usage String
DNS usage name
component string
Service component name
connectionUri string
Connection info for connecting to the service component. This is a combination of host and port.
host string
Host name for connecting to the service component
kafkaAuthenticationMethod string
Kafka authentication method. This is a value specific to the 'kafka' service component
kafkaSslCa string
Kafka certificate used. The possible values are letsencrypt and project_ca.
port number
Port number for connecting to the service component
route string
Network access route
ssl boolean
Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
usage string
DNS usage name
component str
Service component name
connection_uri str
Connection info for connecting to the service component. This is a combination of host and port.
host str
Host name for connecting to the service component
kafka_authentication_method str
Kafka authentication method. This is a value specific to the 'kafka' service component
kafka_ssl_ca str
Kafka certificate used. The possible values are letsencrypt and project_ca.
port int
Port number for connecting to the service component
route str
Network access route
ssl bool
Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
usage str
DNS usage name
component String
Service component name
connectionUri String
Connection info for connecting to the service component. This is a combination of host and port.
host String
Host name for connecting to the service component
kafkaAuthenticationMethod String
Kafka authentication method. This is a value specific to the 'kafka' service component
kafkaSslCa String
Kafka certificate used. The possible values are letsencrypt and project_ca.
port Number
Port number for connecting to the service component
route String
Network access route
ssl Boolean
Whether the endpoint is encrypted or accepts plaintext. By default endpoints are always encrypted and this property is only included for service components they may disable encryption
usage String
DNS usage name

KafkaKafka
, KafkaKafkaArgs

AccessCert string
The Kafka client certificate.
AccessKey string
The Kafka client certificate key.
ConnectUri string
The Kafka Connect URI.
RestUri string
The Kafka REST URI.
SchemaRegistryUri string
The Schema Registry URI.
Uris List<string>
Kafka server URIs.
AccessCert string
The Kafka client certificate.
AccessKey string
The Kafka client certificate key.
ConnectUri string
The Kafka Connect URI.
RestUri string
The Kafka REST URI.
SchemaRegistryUri string
The Schema Registry URI.
Uris []string
Kafka server URIs.
accessCert String
The Kafka client certificate.
accessKey String
The Kafka client certificate key.
connectUri String
The Kafka Connect URI.
restUri String
The Kafka REST URI.
schemaRegistryUri String
The Schema Registry URI.
uris List<String>
Kafka server URIs.
accessCert string
The Kafka client certificate.
accessKey string
The Kafka client certificate key.
connectUri string
The Kafka Connect URI.
restUri string
The Kafka REST URI.
schemaRegistryUri string
The Schema Registry URI.
uris string[]
Kafka server URIs.
access_cert str
The Kafka client certificate.
access_key str
The Kafka client certificate key.
connect_uri str
The Kafka Connect URI.
rest_uri str
The Kafka REST URI.
schema_registry_uri str
The Schema Registry URI.
uris Sequence[str]
Kafka server URIs.
accessCert String
The Kafka client certificate.
accessKey String
The Kafka client certificate key.
connectUri String
The Kafka Connect URI.
restUri String
The Kafka REST URI.
schemaRegistryUri String
The Schema Registry URI.
uris List<String>
Kafka server URIs.

KafkaKafkaUserConfig
, KafkaKafkaUserConfigArgs

AdditionalBackupRegions string
Additional Cloud Regions for Backup Replication.

Deprecated: This property is deprecated.

AivenKafkaTopicMessages bool
Allow access to read Kafka topic messages in the Aiven Console and REST API.
CustomDomain string
Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
FollowerFetching KafkaKafkaUserConfigFollowerFetching
Enable follower fetching
IpFilterObjects List<KafkaKafkaUserConfigIpFilterObject>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
IpFilterStrings List<string>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
IpFilters List<string>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

Deprecated: Deprecated. Use ip_filter_string instead.

Kafka KafkaKafkaUserConfigKafka
Kafka broker configuration values
KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
Kafka authentication methods
KafkaConnect bool
Enable Kafka Connect service. Default: false.
KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
Kafka Connect configuration values
KafkaConnectSecretProviders List<KafkaKafkaUserConfigKafkaConnectSecretProvider>
KafkaRest bool
Enable Kafka-REST service. Default: false.
KafkaRestAuthorization bool
Enable authorization in Kafka-REST service.
KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
Kafka REST configuration
KafkaSaslMechanisms KafkaKafkaUserConfigKafkaSaslMechanisms
Kafka SASL mechanisms
KafkaVersion string
Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, and newer. Kafka major version.
LetsencryptSaslPrivatelink bool
Use Letsencrypt CA for Kafka SASL via Privatelink.
PrivateAccess KafkaKafkaUserConfigPrivateAccess
Allow access to selected service ports from private networks
PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
Allow access to selected service components through Privatelink
PublicAccess KafkaKafkaUserConfigPublicAccess
Allow access to selected service ports from the public Internet
SchemaRegistry bool
Enable Schema-Registry service. Default: false.
SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
Schema Registry configuration
ServiceLog bool
Store logs for the service so that they are available in the HTTP API and console.
SingleZone KafkaKafkaUserConfigSingleZone
Single-zone configuration
StaticIps bool
Use static public IP addresses.
TieredStorage KafkaKafkaUserConfigTieredStorage
Tiered storage configuration
AdditionalBackupRegions string
Additional Cloud Regions for Backup Replication.

Deprecated: This property is deprecated.

AivenKafkaTopicMessages bool
Allow access to read Kafka topic messages in the Aiven Console and REST API.
CustomDomain string
Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
FollowerFetching KafkaKafkaUserConfigFollowerFetching
Enable follower fetching
IpFilterObjects []KafkaKafkaUserConfigIpFilterObject
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
IpFilterStrings []string
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
IpFilters []string
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

Deprecated: Deprecated. Use ip_filter_string instead.

Kafka KafkaKafkaUserConfigKafka
Kafka broker configuration values
KafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
Kafka authentication methods
KafkaConnect bool
Enable Kafka Connect service. Default: false.
KafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
Kafka Connect configuration values
KafkaConnectSecretProviders []KafkaKafkaUserConfigKafkaConnectSecretProvider
KafkaRest bool
Enable Kafka-REST service. Default: false.
KafkaRestAuthorization bool
Enable authorization in Kafka-REST service.
KafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
Kafka REST configuration
KafkaSaslMechanisms KafkaKafkaUserConfigKafkaSaslMechanisms
Kafka SASL mechanisms
KafkaVersion string
Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, and newer. Kafka major version.
LetsencryptSaslPrivatelink bool
Use Letsencrypt CA for Kafka SASL via Privatelink.
PrivateAccess KafkaKafkaUserConfigPrivateAccess
Allow access to selected service ports from private networks
PrivatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
Allow access to selected service components through Privatelink
PublicAccess KafkaKafkaUserConfigPublicAccess
Allow access to selected service ports from the public Internet
SchemaRegistry bool
Enable Schema-Registry service. Default: false.
SchemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
Schema Registry configuration
ServiceLog bool
Store logs for the service so that they are available in the HTTP API and console.
SingleZone KafkaKafkaUserConfigSingleZone
Single-zone configuration
StaticIps bool
Use static public IP addresses.
TieredStorage KafkaKafkaUserConfigTieredStorage
Tiered storage configuration
additionalBackupRegions String
Additional Cloud Regions for Backup Replication.

Deprecated: This property is deprecated.

aivenKafkaTopicMessages Boolean
Allow access to read Kafka topic messages in the Aiven Console and REST API.
customDomain String
Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
followerFetching KafkaKafkaUserConfigFollowerFetching
Enable follower fetching
ipFilterObjects List<KafkaKafkaUserConfigIpFilterObject>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
ipFilterStrings List<String>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
ipFilters List<String>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

Deprecated: Deprecated. Use ip_filter_string instead.

kafka KafkaKafkaUserConfigKafka
Kafka broker configuration values
kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
Kafka authentication methods
kafkaConnect Boolean
Enable Kafka Connect service. Default: false.
kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
Kafka Connect configuration values
kafkaConnectSecretProviders List<KafkaKafkaUserConfigKafkaConnectSecretProvider>
kafkaRest Boolean
Enable Kafka-REST service. Default: false.
kafkaRestAuthorization Boolean
Enable authorization in Kafka-REST service.
kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
Kafka REST configuration
kafkaSaslMechanisms KafkaKafkaUserConfigKafkaSaslMechanisms
Kafka SASL mechanisms
kafkaVersion String
Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, and newer. Kafka major version.
letsencryptSaslPrivatelink Boolean
Use Letsencrypt CA for Kafka SASL via Privatelink.
privateAccess KafkaKafkaUserConfigPrivateAccess
Allow access to selected service ports from private networks
privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
Allow access to selected service components through Privatelink
publicAccess KafkaKafkaUserConfigPublicAccess
Allow access to selected service ports from the public Internet
schemaRegistry Boolean
Enable Schema-Registry service. Default: false.
schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
Schema Registry configuration
serviceLog Boolean
Store logs for the service so that they are available in the HTTP API and console.
singleZone KafkaKafkaUserConfigSingleZone
Single-zone configuration
staticIps Boolean
Use static public IP addresses.
tieredStorage KafkaKafkaUserConfigTieredStorage
Tiered storage configuration
additionalBackupRegions string
Additional Cloud Regions for Backup Replication.

Deprecated: This property is deprecated.

aivenKafkaTopicMessages boolean
Allow access to read Kafka topic messages in the Aiven Console and REST API.
customDomain string
Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
followerFetching KafkaKafkaUserConfigFollowerFetching
Enable follower fetching
ipFilterObjects KafkaKafkaUserConfigIpFilterObject[]
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
ipFilterStrings string[]
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
ipFilters string[]
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

Deprecated: Deprecated. Use ip_filter_string instead.

kafka KafkaKafkaUserConfigKafka
Kafka broker configuration values
kafkaAuthenticationMethods KafkaKafkaUserConfigKafkaAuthenticationMethods
Kafka authentication methods
kafkaConnect boolean
Enable Kafka Connect service. Default: false.
kafkaConnectConfig KafkaKafkaUserConfigKafkaConnectConfig
Kafka Connect configuration values
kafkaConnectSecretProviders KafkaKafkaUserConfigKafkaConnectSecretProvider[]
kafkaRest boolean
Enable Kafka-REST service. Default: false.
kafkaRestAuthorization boolean
Enable authorization in Kafka-REST service.
kafkaRestConfig KafkaKafkaUserConfigKafkaRestConfig
Kafka REST configuration
kafkaSaslMechanisms KafkaKafkaUserConfigKafkaSaslMechanisms
Kafka SASL mechanisms
kafkaVersion string
Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, and newer. Kafka major version.
letsencryptSaslPrivatelink boolean
Use Letsencrypt CA for Kafka SASL via Privatelink.
privateAccess KafkaKafkaUserConfigPrivateAccess
Allow access to selected service ports from private networks
privatelinkAccess KafkaKafkaUserConfigPrivatelinkAccess
Allow access to selected service components through Privatelink
publicAccess KafkaKafkaUserConfigPublicAccess
Allow access to selected service ports from the public Internet
schemaRegistry boolean
Enable Schema-Registry service. Default: false.
schemaRegistryConfig KafkaKafkaUserConfigSchemaRegistryConfig
Schema Registry configuration
serviceLog boolean
Store logs for the service so that they are available in the HTTP API and console.
singleZone KafkaKafkaUserConfigSingleZone
Single-zone configuration
staticIps boolean
Use static public IP addresses.
tieredStorage KafkaKafkaUserConfigTieredStorage
Tiered storage configuration
additional_backup_regions str
Additional Cloud Regions for Backup Replication.

Deprecated: This property is deprecated.

aiven_kafka_topic_messages bool
Allow access to read Kafka topic messages in the Aiven Console and REST API.
custom_domain str
Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
follower_fetching KafkaKafkaUserConfigFollowerFetching
Enable follower fetching
ip_filter_objects Sequence[KafkaKafkaUserConfigIpFilterObject]
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
ip_filter_strings Sequence[str]
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
ip_filters Sequence[str]
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

Deprecated: Deprecated. Use ip_filter_string instead.

kafka KafkaKafkaUserConfigKafka
Kafka broker configuration values
kafka_authentication_methods KafkaKafkaUserConfigKafkaAuthenticationMethods
Kafka authentication methods
kafka_connect bool
Enable Kafka Connect service. Default: false.
kafka_connect_config KafkaKafkaUserConfigKafkaConnectConfig
Kafka Connect configuration values
kafka_connect_secret_providers Sequence[KafkaKafkaUserConfigKafkaConnectSecretProvider]
kafka_rest bool
Enable Kafka-REST service. Default: false.
kafka_rest_authorization bool
Enable authorization in Kafka-REST service.
kafka_rest_config KafkaKafkaUserConfigKafkaRestConfig
Kafka REST configuration
kafka_sasl_mechanisms KafkaKafkaUserConfigKafkaSaslMechanisms
Kafka SASL mechanisms
kafka_version str
Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, and newer. Kafka major version.
letsencrypt_sasl_privatelink bool
Use Letsencrypt CA for Kafka SASL via Privatelink.
private_access KafkaKafkaUserConfigPrivateAccess
Allow access to selected service ports from private networks
privatelink_access KafkaKafkaUserConfigPrivatelinkAccess
Allow access to selected service components through Privatelink
public_access KafkaKafkaUserConfigPublicAccess
Allow access to selected service ports from the public Internet
schema_registry bool
Enable Schema-Registry service. Default: false.
schema_registry_config KafkaKafkaUserConfigSchemaRegistryConfig
Schema Registry configuration
service_log bool
Store logs for the service so that they are available in the HTTP API and console.
single_zone KafkaKafkaUserConfigSingleZone
Single-zone configuration
static_ips bool
Use static public IP addresses.
tiered_storage KafkaKafkaUserConfigTieredStorage
Tiered storage configuration
additionalBackupRegions String
Additional Cloud Regions for Backup Replication.

Deprecated: This property is deprecated.

aivenKafkaTopicMessages Boolean
Allow access to read Kafka topic messages in the Aiven Console and REST API.
customDomain String
Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. Example: grafana.example.org.
followerFetching Property Map
Enable follower fetching
ipFilterObjects List<Property Map>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16
ipFilterStrings List<String>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.
ipFilters List<String>
Allow incoming connections from CIDR address block, e.g. 10.20.0.0/16.

Deprecated: Deprecated. Use ip_filter_string instead.

kafka Property Map
Kafka broker configuration values
kafkaAuthenticationMethods Property Map
Kafka authentication methods
kafkaConnect Boolean
Enable Kafka Connect service. Default: false.
kafkaConnectConfig Property Map
Kafka Connect configuration values
kafkaConnectSecretProviders List<Property Map>
kafkaRest Boolean
Enable Kafka-REST service. Default: false.
kafkaRestAuthorization Boolean
Enable authorization in Kafka-REST service.
kafkaRestConfig Property Map
Kafka REST configuration
kafkaSaslMechanisms Property Map
Kafka SASL mechanisms
kafkaVersion String
Enum: 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, and newer. Kafka major version.
letsencryptSaslPrivatelink Boolean
Use Letsencrypt CA for Kafka SASL via Privatelink.
privateAccess Property Map
Allow access to selected service ports from private networks
privatelinkAccess Property Map
Allow access to selected service components through Privatelink
publicAccess Property Map
Allow access to selected service ports from the public Internet
schemaRegistry Boolean
Enable Schema-Registry service. Default: false.
schemaRegistryConfig Property Map
Schema Registry configuration
serviceLog Boolean
Store logs for the service so that they are available in the HTTP API and console.
singleZone Property Map
Single-zone configuration
staticIps Boolean
Use static public IP addresses.
tieredStorage Property Map
Tiered storage configuration

KafkaKafkaUserConfigFollowerFetching
, KafkaKafkaUserConfigFollowerFetchingArgs

Enabled bool
Whether to enable the follower fetching functionality.
Enabled bool
Whether to enable the follower fetching functionality.
enabled Boolean
Whether to enable the follower fetching functionality.
enabled boolean
Whether to enable the follower fetching functionality.
enabled bool
Whether to enable the follower fetching functionality.
enabled Boolean
Whether to enable the follower fetching functionality.

KafkaKafkaUserConfigIpFilterObject
, KafkaKafkaUserConfigIpFilterObjectArgs

Network This property is required. string
CIDR address block. Example: 10.20.0.0/16.
Description string
Description for IP filter list entry. Example: Production service IP range.
Network This property is required. string
CIDR address block. Example: 10.20.0.0/16.
Description string
Description for IP filter list entry. Example: Production service IP range.
network This property is required. String
CIDR address block. Example: 10.20.0.0/16.
description String
Description for IP filter list entry. Example: Production service IP range.
network This property is required. string
CIDR address block. Example: 10.20.0.0/16.
description string
Description for IP filter list entry. Example: Production service IP range.
network This property is required. str
CIDR address block. Example: 10.20.0.0/16.
description str
Description for IP filter list entry. Example: Production service IP range.
network This property is required. String
CIDR address block. Example: 10.20.0.0/16.
description String
Description for IP filter list entry. Example: Production service IP range.

KafkaKafkaUserConfigKafka
, KafkaKafkaUserConfigKafkaArgs

AutoCreateTopicsEnable bool
Enable auto-creation of topics. (Default: true).
CompressionType string
Enum: gzip, lz4, producer, snappy, uncompressed, zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.(Default: producer).
ConnectionsMaxIdleMs int
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
DefaultReplicationFactor int
Replication factor for auto-created topics (Default: 3).
GroupInitialRebalanceDelayMs int
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
GroupMaxSessionTimeoutMs int
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
GroupMinSessionTimeoutMs int
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
LogCleanerDeleteRetentionMs int
How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
LogCleanerMaxCompactionLagMs int
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
LogCleanerMinCleanableRatio double
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
LogCleanerMinCompactionLagMs int
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
LogCleanupPolicy string
Enum: compact, compact,delete, delete. The default cleanup policy for segments beyond the retention window (Default: delete).
LogFlushIntervalMessages int
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
LogFlushIntervalMs int
The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
LogIndexIntervalBytes int
The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
LogIndexSizeMaxBytes int
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
LogLocalRetentionBytes int
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
LogLocalRetentionMs int
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
LogMessageDownconversionEnable bool
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
LogMessageTimestampDifferenceMaxMs int
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
LogMessageTimestampType string
Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
LogPreallocate bool
Should pre allocate file when create new segment? (Default: false).
LogRetentionBytes int
The maximum size of the log before deleting messages (Default: -1).
LogRetentionHours int
The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
LogRetentionMs int
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
LogRollJitterMs int
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
LogRollMs int
The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
LogSegmentBytes int
The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
LogSegmentDeleteDelayMs int
The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
MaxConnectionsPerIp int
The maximum number of connections allowed from each ip address (Default: 2147483647).
MaxIncrementalFetchSessionCacheSlots int
The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
MessageMaxBytes int
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
MinInsyncReplicas int
When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: 1.
NumPartitions int
Number of partitions for auto-created topics (Default: 1).
OffsetsRetentionMinutes int
Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
ProducerPurgatoryPurgeIntervalRequests int
The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
ReplicaFetchMaxBytes int
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
ReplicaFetchResponseMaxBytes int
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
SaslOauthbearerExpectedAudience string
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
SaslOauthbearerExpectedIssuer string
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
SaslOauthbearerJwksEndpointUrl string
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
SaslOauthbearerSubClaimName string
Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
SocketRequestMaxBytes int
The maximum number of bytes in a socket request (Default: 104857600 bytes).
TransactionPartitionVerificationEnable bool
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
TransactionRemoveExpiredTransactionCleanupIntervalMs int
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
TransactionStateLogSegmentBytes int
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
AutoCreateTopicsEnable bool
Enable auto-creation of topics. (Default: true).
CompressionType string
Enum: gzip, lz4, producer, snappy, uncompressed, zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.(Default: producer).
ConnectionsMaxIdleMs int
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
DefaultReplicationFactor int
Replication factor for auto-created topics (Default: 3).
GroupInitialRebalanceDelayMs int
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
GroupMaxSessionTimeoutMs int
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
GroupMinSessionTimeoutMs int
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
LogCleanerDeleteRetentionMs int
How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
LogCleanerMaxCompactionLagMs int
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
LogCleanerMinCleanableRatio float64
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
LogCleanerMinCompactionLagMs int
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
LogCleanupPolicy string
Enum: compact, compact,delete, delete. The default cleanup policy for segments beyond the retention window (Default: delete).
LogFlushIntervalMessages int
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
LogFlushIntervalMs int
The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
LogIndexIntervalBytes int
The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
LogIndexSizeMaxBytes int
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
LogLocalRetentionBytes int
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
LogLocalRetentionMs int
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
LogMessageDownconversionEnable bool
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
LogMessageTimestampDifferenceMaxMs int
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
LogMessageTimestampType string
Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
LogPreallocate bool
Should pre allocate file when create new segment? (Default: false).
LogRetentionBytes int
The maximum size of the log before deleting messages (Default: -1).
LogRetentionHours int
The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
LogRetentionMs int
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
LogRollJitterMs int
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
LogRollMs int
The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
LogSegmentBytes int
The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
LogSegmentDeleteDelayMs int
The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
MaxConnectionsPerIp int
The maximum number of connections allowed from each ip address (Default: 2147483647).
MaxIncrementalFetchSessionCacheSlots int
The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
MessageMaxBytes int
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
MinInsyncReplicas int
When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: 1.
NumPartitions int
Number of partitions for auto-created topics (Default: 1).
OffsetsRetentionMinutes int
Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
ProducerPurgatoryPurgeIntervalRequests int
The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
ReplicaFetchMaxBytes int
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
ReplicaFetchResponseMaxBytes int
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
SaslOauthbearerExpectedAudience string
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
SaslOauthbearerExpectedIssuer string
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
SaslOauthbearerJwksEndpointUrl string
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
SaslOauthbearerSubClaimName string
Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
SocketRequestMaxBytes int
The maximum number of bytes in a socket request (Default: 104857600 bytes).
TransactionPartitionVerificationEnable bool
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
TransactionRemoveExpiredTransactionCleanupIntervalMs int
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
TransactionStateLogSegmentBytes int
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
autoCreateTopicsEnable Boolean
Enable auto-creation of topics. (Default: true).
compressionType String
Enum: gzip, lz4, producer, snappy, uncompressed, zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.(Default: producer).
connectionsMaxIdleMs Integer
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
defaultReplicationFactor Integer
Replication factor for auto-created topics (Default: 3).
groupInitialRebalanceDelayMs Integer
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
groupMaxSessionTimeoutMs Integer
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
groupMinSessionTimeoutMs Integer
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
logCleanerDeleteRetentionMs Integer
How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
logCleanerMaxCompactionLagMs Integer
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
logCleanerMinCleanableRatio Double
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
logCleanerMinCompactionLagMs Integer
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
logCleanupPolicy String
Enum: compact, compact,delete, delete. The default cleanup policy for segments beyond the retention window (Default: delete).
logFlushIntervalMessages Integer
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
logFlushIntervalMs Integer
The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
logIndexIntervalBytes Integer
The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
logIndexSizeMaxBytes Integer
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
logLocalRetentionBytes Integer
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
logLocalRetentionMs Integer
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
logMessageDownconversionEnable Boolean
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
logMessageTimestampDifferenceMaxMs Integer
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
logMessageTimestampType String
Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
logPreallocate Boolean
Should pre allocate file when create new segment? (Default: false).
logRetentionBytes Integer
The maximum size of the log before deleting messages (Default: -1).
logRetentionHours Integer
The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
logRetentionMs Integer
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
logRollJitterMs Integer
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
logRollMs Integer
The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
logSegmentBytes Integer
The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
logSegmentDeleteDelayMs Integer
The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
maxConnectionsPerIp Integer
The maximum number of connections allowed from each ip address (Default: 2147483647).
maxIncrementalFetchSessionCacheSlots Integer
The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
messageMaxBytes Integer
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
minInsyncReplicas Integer
When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: 1.
numPartitions Integer
Number of partitions for auto-created topics (Default: 1).
offsetsRetentionMinutes Integer
Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
producerPurgatoryPurgeIntervalRequests Integer
The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
replicaFetchMaxBytes Integer
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
replicaFetchResponseMaxBytes Integer
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
saslOauthbearerExpectedAudience String
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
saslOauthbearerExpectedIssuer String
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
saslOauthbearerJwksEndpointUrl String
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
saslOauthbearerSubClaimName String
Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
socketRequestMaxBytes Integer
The maximum number of bytes in a socket request (Default: 104857600 bytes).
transactionPartitionVerificationEnable Boolean
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
transactionRemoveExpiredTransactionCleanupIntervalMs Integer
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
transactionStateLogSegmentBytes Integer
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
autoCreateTopicsEnable boolean
Enable auto-creation of topics. (Default: true).
compressionType string
Enum: gzip, lz4, producer, snappy, uncompressed, zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.(Default: producer).
connectionsMaxIdleMs number
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
defaultReplicationFactor number
Replication factor for auto-created topics (Default: 3).
groupInitialRebalanceDelayMs number
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
groupMaxSessionTimeoutMs number
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
groupMinSessionTimeoutMs number
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
logCleanerDeleteRetentionMs number
How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
logCleanerMaxCompactionLagMs number
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
logCleanerMinCleanableRatio number
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
logCleanerMinCompactionLagMs number
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
logCleanupPolicy string
Enum: compact, compact,delete, delete. The default cleanup policy for segments beyond the retention window (Default: delete).
logFlushIntervalMessages number
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
logFlushIntervalMs number
The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
logIndexIntervalBytes number
The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
logIndexSizeMaxBytes number
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
logLocalRetentionBytes number
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
logLocalRetentionMs number
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
logMessageDownconversionEnable boolean
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
logMessageTimestampDifferenceMaxMs number
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
logMessageTimestampType string
Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
logPreallocate boolean
Should pre allocate file when create new segment? (Default: false).
logRetentionBytes number
The maximum size of the log before deleting messages (Default: -1).
logRetentionHours number
The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
logRetentionMs number
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
logRollJitterMs number
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
logRollMs number
The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
logSegmentBytes number
The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
logSegmentDeleteDelayMs number
The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
maxConnectionsPerIp number
The maximum number of connections allowed from each ip address (Default: 2147483647).
maxIncrementalFetchSessionCacheSlots number
The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
messageMaxBytes number
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
minInsyncReplicas number
When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: 1.
numPartitions number
Number of partitions for auto-created topics (Default: 1).
offsetsRetentionMinutes number
Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
producerPurgatoryPurgeIntervalRequests number
The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
replicaFetchMaxBytes number
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
replicaFetchResponseMaxBytes number
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
saslOauthbearerExpectedAudience string
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
saslOauthbearerExpectedIssuer string
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
saslOauthbearerJwksEndpointUrl string
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
saslOauthbearerSubClaimName string
Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
socketRequestMaxBytes number
The maximum number of bytes in a socket request (Default: 104857600 bytes).
transactionPartitionVerificationEnable boolean
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
transactionRemoveExpiredTransactionCleanupIntervalMs number
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
transactionStateLogSegmentBytes number
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
auto_create_topics_enable bool
Enable auto-creation of topics. (Default: true).
compression_type str
Enum: gzip, lz4, producer, snappy, uncompressed, zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.(Default: producer).
connections_max_idle_ms int
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
default_replication_factor int
Replication factor for auto-created topics (Default: 3).
group_initial_rebalance_delay_ms int
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
group_max_session_timeout_ms int
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
group_min_session_timeout_ms int
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
log_cleaner_delete_retention_ms int
How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
log_cleaner_max_compaction_lag_ms int
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
log_cleaner_min_cleanable_ratio float
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
log_cleaner_min_compaction_lag_ms int
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
log_cleanup_policy str
Enum: compact, compact,delete, delete. The default cleanup policy for segments beyond the retention window (Default: delete).
log_flush_interval_messages int
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
log_flush_interval_ms int
The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
log_index_interval_bytes int
The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
log_index_size_max_bytes int
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
log_local_retention_bytes int
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
log_local_retention_ms int
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
log_message_downconversion_enable bool
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
log_message_timestamp_difference_max_ms int
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
log_message_timestamp_type str
Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
log_preallocate bool
Should pre allocate file when create new segment? (Default: false).
log_retention_bytes int
The maximum size of the log before deleting messages (Default: -1).
log_retention_hours int
The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
log_retention_ms int
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
log_roll_jitter_ms int
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
log_roll_ms int
The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
log_segment_bytes int
The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
log_segment_delete_delay_ms int
The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
max_connections_per_ip int
The maximum number of connections allowed from each ip address (Default: 2147483647).
max_incremental_fetch_session_cache_slots int
The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
message_max_bytes int
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
min_insync_replicas int
When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: 1.
num_partitions int
Number of partitions for auto-created topics (Default: 1).
offsets_retention_minutes int
Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
producer_purgatory_purge_interval_requests int
The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
replica_fetch_max_bytes int
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
replica_fetch_response_max_bytes int
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
sasl_oauthbearer_expected_audience str
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
sasl_oauthbearer_expected_issuer str
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
sasl_oauthbearer_jwks_endpoint_url str
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
sasl_oauthbearer_sub_claim_name str
Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
socket_request_max_bytes int
The maximum number of bytes in a socket request (Default: 104857600 bytes).
transaction_partition_verification_enable bool
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
transaction_remove_expired_transaction_cleanup_interval_ms int
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
transaction_state_log_segment_bytes int
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.
autoCreateTopicsEnable Boolean
Enable auto-creation of topics. (Default: true).
compressionType String
Enum: gzip, lz4, producer, snappy, uncompressed, zstd. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts uncompressed which is equivalent to no compression; and producer which means retain the original compression codec set by the producer.(Default: producer).
connectionsMaxIdleMs Number
Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. (Default: 600000 ms (10 minutes)). Example: 540000.
defaultReplicationFactor Number
Replication factor for auto-created topics (Default: 3).
groupInitialRebalanceDelayMs Number
The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. (Default: 3000 ms (3 seconds)). Example: 3000.
groupMaxSessionTimeoutMs Number
The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. Default: 1800000 ms (30 minutes). Example: 1800000.
groupMinSessionTimeoutMs Number
The minimum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures. (Default: 6000 ms (6 seconds)). Example: 6000.
logCleanerDeleteRetentionMs Number
How long are delete records retained? (Default: 86400000 (1 day)). Example: 86400000.
logCleanerMaxCompactionLagMs Number
The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. (Default: 9223372036854775807 ms (Long.MAX_VALUE)).
logCleanerMinCleanableRatio Number
Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. (Default: 0.5). Example: 0.5.
logCleanerMinCompactionLagMs Number
The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. (Default: 0 ms).
logCleanupPolicy String
Enum: compact, compact,delete, delete. The default cleanup policy for segments beyond the retention window (Default: delete).
logFlushIntervalMessages Number
The number of messages accumulated on a log partition before messages are flushed to disk (Default: 9223372036854775807 (Long.MAX_VALUE)).
logFlushIntervalMs Number
The maximum time in ms that a message in any topic is kept in memory (page-cache) before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used (Default: null).
logIndexIntervalBytes Number
The interval with which Kafka adds an entry to the offset index (Default: 4096 bytes (4 kibibytes)). Example: 4096.
logIndexSizeMaxBytes Number
The maximum size in bytes of the offset index (Default: 10485760 (10 mebibytes)). Example: 10485760.
logLocalRetentionBytes Number
The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value. (Default: -2).
logLocalRetentionMs Number
The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. (Default: -2).
logMessageDownconversionEnable Boolean
This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. (Default: true).
logMessageTimestampDifferenceMaxMs Number
The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message (Default: 9223372036854775807 (Long.MAX_VALUE)).
logMessageTimestampType String
Enum: CreateTime, LogAppendTime. Define whether the timestamp in the message is message create time or log append time. (Default: CreateTime).
logPreallocate Boolean
Should pre allocate file when create new segment? (Default: false).
logRetentionBytes Number
The maximum size of the log before deleting messages (Default: -1).
logRetentionHours Number
The number of hours to keep a log file before deleting it (Default: 168 hours (1 week)).
logRetentionMs Number
The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied. (Default: null, log.retention.hours applies).
logRollJitterMs Number
The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used (Default: null).
logRollMs Number
The maximum time before a new log segment is rolled out (in milliseconds). (Default: null, log.roll.hours applies (Default: 168, 7 days)).
logSegmentBytes Number
The maximum size of a single log file (Default: 1073741824 bytes (1 gibibyte)).
logSegmentDeleteDelayMs Number
The amount of time to wait before deleting a file from the filesystem (Default: 60000 ms (1 minute)). Example: 60000.
maxConnectionsPerIp Number
The maximum number of connections allowed from each ip address (Default: 2147483647).
maxIncrementalFetchSessionCacheSlots Number
The maximum number of incremental fetch sessions that the broker will maintain. (Default: 1000). Example: 1000.
messageMaxBytes Number
The maximum size of message that the server can receive. (Default: 1048588 bytes (1 mebibyte + 12 bytes)). Example: 1048588.
minInsyncReplicas Number
When a producer sets acks to all (or -1), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. (Default: 1). Example: 1.
numPartitions Number
Number of partitions for auto-created topics (Default: 1).
offsetsRetentionMinutes Number
Log retention window in minutes for offsets topic (Default: 10080 minutes (7 days)). Example: 10080.
producerPurgatoryPurgeIntervalRequests Number
The purge interval (in number of requests) of the producer request purgatory (Default: 1000).
replicaFetchMaxBytes Number
The number of bytes of messages to attempt to fetch for each partition . This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. (Default: 1048576 bytes (1 mebibytes)).
replicaFetchResponseMaxBytes Number
Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. (Default: 10485760 bytes (10 mebibytes)).
saslOauthbearerExpectedAudience String
The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences. (Default: null).
saslOauthbearerExpectedIssuer String
Optional setting for the broker to use to verify that the JWT was created by the expected issuer.(Default: null).
saslOauthbearerJwksEndpointUrl String
OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. (Default: null).
saslOauthbearerSubClaimName String
Name of the scope from which to extract the subject claim from the JWT.(Default: sub).
socketRequestMaxBytes Number
The maximum number of bytes in a socket request (Default: 104857600 bytes).
transactionPartitionVerificationEnable Boolean
Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition. (Default: true).
transactionRemoveExpiredTransactionCleanupIntervalMs Number
The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing (Default: 3600000 ms (1 hour)). Example: 3600000.
transactionStateLogSegmentBytes Number
The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads (Default: 104857600 bytes (100 mebibytes)). Example: 104857600.

KafkaKafkaUserConfigKafkaAuthenticationMethods
, KafkaKafkaUserConfigKafkaAuthenticationMethodsArgs

Certificate bool
Enable certificate/SSL authentication. Default: true.
Sasl bool
Enable SASL authentication. Default: false.
Certificate bool
Enable certificate/SSL authentication. Default: true.
Sasl bool
Enable SASL authentication. Default: false.
certificate Boolean
Enable certificate/SSL authentication. Default: true.
sasl Boolean
Enable SASL authentication. Default: false.
certificate boolean
Enable certificate/SSL authentication. Default: true.
sasl boolean
Enable SASL authentication. Default: false.
certificate bool
Enable certificate/SSL authentication. Default: true.
sasl bool
Enable SASL authentication. Default: false.
certificate Boolean
Enable certificate/SSL authentication. Default: true.
sasl Boolean
Enable SASL authentication. Default: false.

KafkaKafkaUserConfigKafkaConnectConfig
, KafkaKafkaUserConfigKafkaConnectConfigArgs

ConnectorClientConfigOverridePolicy string
Enum: All, None. Defines what client configurations can be overridden by the connector. Default is None.
ConsumerAutoOffsetReset string
Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
ConsumerFetchMaxBytes int
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
ConsumerIsolationLevel string
Enum: read_committed, read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
ConsumerMaxPartitionFetchBytes int
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
ConsumerMaxPollIntervalMs int
The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
ConsumerMaxPollRecords int
The maximum number of records returned in a single call to poll() (defaults to 500).
OffsetFlushIntervalMs int
The interval at which to try committing offsets for tasks (defaults to 60000).
OffsetFlushTimeoutMs int
Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
ProducerBatchSize int
This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
ProducerBufferMemory int
The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
ProducerCompressionType string
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
ProducerLingerMs int
This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
ProducerMaxRequestSize int
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
ScheduledRebalanceMaxDelayMs int
The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
SessionTimeoutMs int
The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
ConnectorClientConfigOverridePolicy string
Enum: All, None. Defines what client configurations can be overridden by the connector. Default is None.
ConsumerAutoOffsetReset string
Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
ConsumerFetchMaxBytes int
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
ConsumerIsolationLevel string
Enum: read_committed, read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
ConsumerMaxPartitionFetchBytes int
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
ConsumerMaxPollIntervalMs int
The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
ConsumerMaxPollRecords int
The maximum number of records returned in a single call to poll() (defaults to 500).
OffsetFlushIntervalMs int
The interval at which to try committing offsets for tasks (defaults to 60000).
OffsetFlushTimeoutMs int
Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
ProducerBatchSize int
This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
ProducerBufferMemory int
The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
ProducerCompressionType string
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
ProducerLingerMs int
This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
ProducerMaxRequestSize int
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
ScheduledRebalanceMaxDelayMs int
The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
SessionTimeoutMs int
The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
connectorClientConfigOverridePolicy String
Enum: All, None. Defines what client configurations can be overridden by the connector. Default is None.
consumerAutoOffsetReset String
Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
consumerFetchMaxBytes Integer
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
consumerIsolationLevel String
Enum: read_committed, read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
consumerMaxPartitionFetchBytes Integer
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
consumerMaxPollIntervalMs Integer
The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
consumerMaxPollRecords Integer
The maximum number of records returned in a single call to poll() (defaults to 500).
offsetFlushIntervalMs Integer
The interval at which to try committing offsets for tasks (defaults to 60000).
offsetFlushTimeoutMs Integer
Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
producerBatchSize Integer
This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
producerBufferMemory Integer
The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
producerCompressionType String
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producerLingerMs Integer
This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
producerMaxRequestSize Integer
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
scheduledRebalanceMaxDelayMs Integer
The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
sessionTimeoutMs Integer
The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
connectorClientConfigOverridePolicy string
Enum: All, None. Defines what client configurations can be overridden by the connector. Default is None.
consumerAutoOffsetReset string
Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
consumerFetchMaxBytes number
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
consumerIsolationLevel string
Enum: read_committed, read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
consumerMaxPartitionFetchBytes number
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
consumerMaxPollIntervalMs number
The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
consumerMaxPollRecords number
The maximum number of records returned in a single call to poll() (defaults to 500).
offsetFlushIntervalMs number
The interval at which to try committing offsets for tasks (defaults to 60000).
offsetFlushTimeoutMs number
Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
producerBatchSize number
This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
producerBufferMemory number
The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
producerCompressionType string
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producerLingerMs number
This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
producerMaxRequestSize number
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
scheduledRebalanceMaxDelayMs number
The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
sessionTimeoutMs number
The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
connector_client_config_override_policy str
Enum: All, None. Defines what client configurations can be overridden by the connector. Default is None.
consumer_auto_offset_reset str
Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
consumer_fetch_max_bytes int
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
consumer_isolation_level str
Enum: read_committed, read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
consumer_max_partition_fetch_bytes int
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
consumer_max_poll_interval_ms int
The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
consumer_max_poll_records int
The maximum number of records returned in a single call to poll() (defaults to 500).
offset_flush_interval_ms int
The interval at which to try committing offsets for tasks (defaults to 60000).
offset_flush_timeout_ms int
Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
producer_batch_size int
This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
producer_buffer_memory int
The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
producer_compression_type str
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producer_linger_ms int
This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
producer_max_request_size int
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
scheduled_rebalance_max_delay_ms int
The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
session_timeout_ms int
The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).
connectorClientConfigOverridePolicy String
Enum: All, None. Defines what client configurations can be overridden by the connector. Default is None.
consumerAutoOffsetReset String
Enum: earliest, latest. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
consumerFetchMaxBytes Number
Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. Example: 52428800.
consumerIsolationLevel String
Enum: read_committed, read_uncommitted. Transaction read isolation level. readuncommitted is the default, but readcommitted can be used if consume-exactly-once behavior is desired.
consumerMaxPartitionFetchBytes Number
Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. Example: 1048576.
consumerMaxPollIntervalMs Number
The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
consumerMaxPollRecords Number
The maximum number of records returned in a single call to poll() (defaults to 500).
offsetFlushIntervalMs Number
The interval at which to try committing offsets for tasks (defaults to 60000).
offsetFlushTimeoutMs Number
Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000).
producerBatchSize Number
This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will linger for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384).
producerBufferMemory Number
The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432).
producerCompressionType String
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producerLingerMs Number
This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will linger for the specified time waiting for more records to show up. Defaults to 0.
producerMaxRequestSize Number
This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. Example: 1048576.
scheduledRebalanceMaxDelayMs Number
The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes.
sessionTimeoutMs Number
The timeout in milliseconds used to detect failures when using Kafka’s group management facilities (defaults to 10000).

KafkaKafkaUserConfigKafkaConnectSecretProvider
, KafkaKafkaUserConfigKafkaConnectSecretProviderArgs

Name This property is required. string
Name of the secret provider. Used to reference secrets in connector config.
Aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
AWS secret provider configuration
Vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
Vault secret provider configuration
Name This property is required. string
Name of the secret provider. Used to reference secrets in connector config.
Aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
AWS secret provider configuration
Vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
Vault secret provider configuration
name This property is required. String
Name of the secret provider. Used to reference secrets in connector config.
aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
AWS secret provider configuration
vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
Vault secret provider configuration
name This property is required. string
Name of the secret provider. Used to reference secrets in connector config.
aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
AWS secret provider configuration
vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
Vault secret provider configuration
name This property is required. str
Name of the secret provider. Used to reference secrets in connector config.
aws KafkaKafkaUserConfigKafkaConnectSecretProviderAws
AWS secret provider configuration
vault KafkaKafkaUserConfigKafkaConnectSecretProviderVault
Vault secret provider configuration
name This property is required. String
Name of the secret provider. Used to reference secrets in connector config.
aws Property Map
AWS secret provider configuration
vault Property Map
Vault secret provider configuration

KafkaKafkaUserConfigKafkaConnectSecretProviderAws
, KafkaKafkaUserConfigKafkaConnectSecretProviderAwsArgs

AuthMethod This property is required. string
Enum: credentials. Auth method of the vault secret provider.
Region This property is required. string
Region used to lookup secrets with AWS SecretManager.
AccessKey string
Access key used to authenticate with aws.
SecretKey string
Secret key used to authenticate with aws.
AuthMethod This property is required. string
Enum: credentials. Auth method of the vault secret provider.
Region This property is required. string
Region used to lookup secrets with AWS SecretManager.
AccessKey string
Access key used to authenticate with aws.
SecretKey string
Secret key used to authenticate with aws.
authMethod This property is required. String
Enum: credentials. Auth method of the vault secret provider.
region This property is required. String
Region used to lookup secrets with AWS SecretManager.
accessKey String
Access key used to authenticate with aws.
secretKey String
Secret key used to authenticate with aws.
authMethod This property is required. string
Enum: credentials. Auth method of the vault secret provider.
region This property is required. string
Region used to lookup secrets with AWS SecretManager.
accessKey string
Access key used to authenticate with aws.
secretKey string
Secret key used to authenticate with aws.
auth_method This property is required. str
Enum: credentials. Auth method of the vault secret provider.
region This property is required. str
Region used to lookup secrets with AWS SecretManager.
access_key str
Access key used to authenticate with aws.
secret_key str
Secret key used to authenticate with aws.
authMethod This property is required. String
Enum: credentials. Auth method of the vault secret provider.
region This property is required. String
Region used to lookup secrets with AWS SecretManager.
accessKey String
Access key used to authenticate with aws.
secretKey String
Secret key used to authenticate with aws.

KafkaKafkaUserConfigKafkaConnectSecretProviderVault
, KafkaKafkaUserConfigKafkaConnectSecretProviderVaultArgs

Address This property is required. string
Address of the Vault server.
AuthMethod This property is required. string
Enum: token. Auth method of the vault secret provider.
EngineVersion int
Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
PrefixPathDepth int
Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
Token string
Token used to authenticate with vault and auth method token.
Address This property is required. string
Address of the Vault server.
AuthMethod This property is required. string
Enum: token. Auth method of the vault secret provider.
EngineVersion int
Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
PrefixPathDepth int
Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
Token string
Token used to authenticate with vault and auth method token.
address This property is required. String
Address of the Vault server.
authMethod This property is required. String
Enum: token. Auth method of the vault secret provider.
engineVersion Integer
Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
prefixPathDepth Integer
Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
token String
Token used to authenticate with vault and auth method token.
address This property is required. string
Address of the Vault server.
authMethod This property is required. string
Enum: token. Auth method of the vault secret provider.
engineVersion number
Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
prefixPathDepth number
Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
token string
Token used to authenticate with vault and auth method token.
address This property is required. str
Address of the Vault server.
auth_method This property is required. str
Enum: token. Auth method of the vault secret provider.
engine_version int
Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
prefix_path_depth int
Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
token str
Token used to authenticate with vault and auth method token.
address This property is required. String
Address of the Vault server.
authMethod This property is required. String
Enum: token. Auth method of the vault secret provider.
engineVersion Number
Enum: 1, 2, and newer. KV Secrets Engine version of the Vault server instance.
prefixPathDepth Number
Prefix path depth of the secrets Engine. Default is 1. If the secrets engine path has more than one segment it has to be increased to the number of segments.
token String
Token used to authenticate with vault and auth method token.

KafkaKafkaUserConfigKafkaRestConfig
, KafkaKafkaUserConfigKafkaRestConfigArgs

ConsumerEnableAutoCommit bool
If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
ConsumerIdleDisconnectTimeout int
Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
ConsumerRequestMaxBytes int
Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
ConsumerRequestTimeoutMs int
Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
NameStrategy string
Enum: record_name, topic_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
NameStrategyValidation bool
If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
ProducerAcks string
Enum: -1, 0, 1, all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
ProducerCompressionType string
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
ProducerLingerMs int
Wait for up to the given delay to allow batching records together. Default: 0.
ProducerMaxRequestSize int
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
SimpleconsumerPoolSizeMax int
Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
ConsumerEnableAutoCommit bool
If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
ConsumerIdleDisconnectTimeout int
Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
ConsumerRequestMaxBytes int
Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
ConsumerRequestTimeoutMs int
Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
NameStrategy string
Enum: record_name, topic_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
NameStrategyValidation bool
If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
ProducerAcks string
Enum: -1, 0, 1, all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
ProducerCompressionType string
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
ProducerLingerMs int
Wait for up to the given delay to allow batching records together. Default: 0.
ProducerMaxRequestSize int
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
SimpleconsumerPoolSizeMax int
Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
consumerEnableAutoCommit Boolean
If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
consumerIdleDisconnectTimeout Integer
Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
consumerRequestMaxBytes Integer
Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
consumerRequestTimeoutMs Integer
Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
nameStrategy String
Enum: record_name, topic_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
nameStrategyValidation Boolean
If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
producerAcks String
Enum: -1, 0, 1, all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
producerCompressionType String
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producerLingerMs Integer
Wait for up to the given delay to allow batching records together. Default: 0.
producerMaxRequestSize Integer
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
simpleconsumerPoolSizeMax Integer
Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
consumerEnableAutoCommit boolean
If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
consumerIdleDisconnectTimeout number
Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
consumerRequestMaxBytes number
Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
consumerRequestTimeoutMs number
Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
nameStrategy string
Enum: record_name, topic_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
nameStrategyValidation boolean
If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
producerAcks string
Enum: -1, 0, 1, all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
producerCompressionType string
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producerLingerMs number
Wait for up to the given delay to allow batching records together. Default: 0.
producerMaxRequestSize number
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
simpleconsumerPoolSizeMax number
Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
consumer_enable_auto_commit bool
If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
consumer_idle_disconnect_timeout int
Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
consumer_request_max_bytes int
Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
consumer_request_timeout_ms int
Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
name_strategy str
Enum: record_name, topic_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
name_strategy_validation bool
If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
producer_acks str
Enum: -1, 0, 1, all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
producer_compression_type str
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producer_linger_ms int
Wait for up to the given delay to allow batching records together. Default: 0.
producer_max_request_size int
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
simpleconsumer_pool_size_max int
Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.
consumerEnableAutoCommit Boolean
If true the consumer's offset will be periodically committed to Kafka in the background. Default: true.
consumerIdleDisconnectTimeout Number
Specifies the maximum duration (in seconds) a client can remain idle before it is deleted. If a consumer is inactive, it will exit the consumer group, and its state will be discarded. A value of 0 (default) indicates that the consumer will not be disconnected automatically due to inactivity. Default: 0.
consumerRequestMaxBytes Number
Maximum number of bytes in unencoded message keys and values by a single request. Default: 67108864.
consumerRequestTimeoutMs Number
Enum: 1000, 15000, 30000. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. Default: 1000.
nameStrategy String
Enum: record_name, topic_name, topic_record_name. Name strategy to use when selecting subject for storing schemas. Default: topic_name.
nameStrategyValidation Boolean
If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. Default: true.
producerAcks String
Enum: -1, 0, 1, all. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to all or -1, the leader will wait for the full set of in-sync replicas to acknowledge the record. Default: 1.
producerCompressionType String
Enum: gzip, lz4, none, snappy, zstd. Specify the default compression type for producers. This configuration accepts the standard compression codecs (gzip, snappy, lz4, zstd). It additionally accepts none which is the default and equivalent to no compression.
producerLingerMs Number
Wait for up to the given delay to allow batching records together. Default: 0.
producerMaxRequestSize Number
The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. Default: 1048576.
simpleconsumerPoolSizeMax Number
Maximum number of SimpleConsumers that can be instantiated per broker. Default: 25.

KafkaKafkaUserConfigKafkaSaslMechanisms
, KafkaKafkaUserConfigKafkaSaslMechanismsArgs

Plain bool
Enable PLAIN mechanism. Default: true.
ScramSha256 bool
Enable SCRAM-SHA-256 mechanism. Default: true.
ScramSha512 bool
Enable SCRAM-SHA-512 mechanism. Default: true.
Plain bool
Enable PLAIN mechanism. Default: true.
ScramSha256 bool
Enable SCRAM-SHA-256 mechanism. Default: true.
ScramSha512 bool
Enable SCRAM-SHA-512 mechanism. Default: true.
plain Boolean
Enable PLAIN mechanism. Default: true.
scramSha256 Boolean
Enable SCRAM-SHA-256 mechanism. Default: true.
scramSha512 Boolean
Enable SCRAM-SHA-512 mechanism. Default: true.
plain boolean
Enable PLAIN mechanism. Default: true.
scramSha256 boolean
Enable SCRAM-SHA-256 mechanism. Default: true.
scramSha512 boolean
Enable SCRAM-SHA-512 mechanism. Default: true.
plain bool
Enable PLAIN mechanism. Default: true.
scram_sha256 bool
Enable SCRAM-SHA-256 mechanism. Default: true.
scram_sha512 bool
Enable SCRAM-SHA-512 mechanism. Default: true.
plain Boolean
Enable PLAIN mechanism. Default: true.
scramSha256 Boolean
Enable SCRAM-SHA-256 mechanism. Default: true.
scramSha512 Boolean
Enable SCRAM-SHA-512 mechanism. Default: true.

KafkaKafkaUserConfigPrivateAccess
, KafkaKafkaUserConfigPrivateAccessArgs

Kafka bool
Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaConnect bool
Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaRest bool
Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
SchemaRegistry bool
Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
Kafka bool
Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaConnect bool
Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
KafkaRest bool
Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
Prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
SchemaRegistry bool
Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafka Boolean
Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafkaConnect Boolean
Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafkaRest Boolean
Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
schemaRegistry Boolean
Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafka boolean
Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafkaConnect boolean
Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafkaRest boolean
Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
prometheus boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
schemaRegistry boolean
Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafka bool
Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafka_connect bool
Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafka_rest bool
Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
prometheus bool
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
schema_registry bool
Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafka Boolean
Allow clients to connect to kafka with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafkaConnect Boolean
Allow clients to connect to kafka_connect with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
kafkaRest Boolean
Allow clients to connect to kafka_rest with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
prometheus Boolean
Allow clients to connect to prometheus with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.
schemaRegistry Boolean
Allow clients to connect to schema_registry with a DNS name that always resolves to the service's private IP addresses. Only available in certain network locations.

KafkaKafkaUserConfigPrivatelinkAccess
, KafkaKafkaUserConfigPrivatelinkAccessArgs

Jolokia bool
Enable jolokia.
Kafka bool
Enable kafka.
KafkaConnect bool
Enable kafka_connect.
KafkaRest bool
Enable kafka_rest.
Prometheus bool
Enable prometheus.
SchemaRegistry bool
Enable schema_registry.
Jolokia bool
Enable jolokia.
Kafka bool
Enable kafka.
KafkaConnect bool
Enable kafka_connect.
KafkaRest bool
Enable kafka_rest.
Prometheus bool
Enable prometheus.
SchemaRegistry bool
Enable schema_registry.
jolokia Boolean
Enable jolokia.
kafka Boolean
Enable kafka.
kafkaConnect Boolean
Enable kafka_connect.
kafkaRest Boolean
Enable kafka_rest.
prometheus Boolean
Enable prometheus.
schemaRegistry Boolean
Enable schema_registry.
jolokia boolean
Enable jolokia.
kafka boolean
Enable kafka.
kafkaConnect boolean
Enable kafka_connect.
kafkaRest boolean
Enable kafka_rest.
prometheus boolean
Enable prometheus.
schemaRegistry boolean
Enable schema_registry.
jolokia bool
Enable jolokia.
kafka bool
Enable kafka.
kafka_connect bool
Enable kafka_connect.
kafka_rest bool
Enable kafka_rest.
prometheus bool
Enable prometheus.
schema_registry bool
Enable schema_registry.
jolokia Boolean
Enable jolokia.
kafka Boolean
Enable kafka.
kafkaConnect Boolean
Enable kafka_connect.
kafkaRest Boolean
Enable kafka_rest.
prometheus Boolean
Enable prometheus.
schemaRegistry Boolean
Enable schema_registry.

KafkaKafkaUserConfigPublicAccess
, KafkaKafkaUserConfigPublicAccessArgs

Kafka bool
Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
KafkaConnect bool
Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
KafkaRest bool
Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
Prometheus bool
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
SchemaRegistry bool
Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
Kafka bool
Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
KafkaConnect bool
Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
KafkaRest bool
Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
Prometheus bool
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
SchemaRegistry bool
Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
kafka Boolean
Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
kafkaConnect Boolean
Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
kafkaRest Boolean
Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
prometheus Boolean
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
schemaRegistry Boolean
Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
kafka boolean
Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
kafkaConnect boolean
Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
kafkaRest boolean
Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
prometheus boolean
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
schemaRegistry boolean
Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
kafka bool
Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
kafka_connect bool
Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
kafka_rest bool
Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
prometheus bool
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
schema_registry bool
Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.
kafka Boolean
Allow clients to connect to kafka from the public internet for service nodes that are in a project VPC or another type of private network.
kafkaConnect Boolean
Allow clients to connect to kafka_connect from the public internet for service nodes that are in a project VPC or another type of private network.
kafkaRest Boolean
Allow clients to connect to kafka_rest from the public internet for service nodes that are in a project VPC or another type of private network.
prometheus Boolean
Allow clients to connect to prometheus from the public internet for service nodes that are in a project VPC or another type of private network.
schemaRegistry Boolean
Allow clients to connect to schema_registry from the public internet for service nodes that are in a project VPC or another type of private network.

KafkaKafkaUserConfigSchemaRegistryConfig
, KafkaKafkaUserConfigSchemaRegistryConfigArgs

LeaderEligibility bool
If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
RetriableErrorsSilenced bool
If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
SchemaReaderStrictMode bool
If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemas topic. Defaults to false.
TopicName string
The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
LeaderEligibility bool
If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
RetriableErrorsSilenced bool
If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
SchemaReaderStrictMode bool
If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemas topic. Defaults to false.
TopicName string
The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
leaderEligibility Boolean
If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
retriableErrorsSilenced Boolean
If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
schemaReaderStrictMode Boolean
If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemas topic. Defaults to false.
topicName String
The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
leaderEligibility boolean
If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
retriableErrorsSilenced boolean
If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
schemaReaderStrictMode boolean
If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemas topic. Defaults to false.
topicName string
The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
leader_eligibility bool
If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
retriable_errors_silenced bool
If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
schema_reader_strict_mode bool
If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemas topic. Defaults to false.
topic_name str
The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.
leaderEligibility Boolean
If true, Karapace / Schema Registry on the service nodes can participate in leader election. It might be needed to disable this when the schemas topic is replicated to a secondary cluster and Karapace / Schema Registry there must not participate in leader election. Defaults to true.
retriableErrorsSilenced Boolean
If enabled, kafka errors which can be retried or custom errors specified for the service will not be raised, instead, a warning log is emitted. This will denoise issue tracking systems, i.e. sentry. Defaults to true.
schemaReaderStrictMode Boolean
If enabled, causes the Karapace schema-registry service to shutdown when there are invalid schema records in the _schemas topic. Defaults to false.
topicName String
The durable single partition topic that acts as the durable log for the data. This topic must be compacted to avoid losing data due to retention policy. Please note that changing this configuration in an existing Schema Registry / Karapace setup leads to previous schemas being inaccessible, data encoded with them potentially unreadable and schema ID sequence put out of order. It's only possible to do the switch while Schema Registry / Karapace is disabled. Defaults to _schemas.

KafkaKafkaUserConfigSingleZone
, KafkaKafkaUserConfigSingleZoneArgs

Enabled bool
Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
Enabled bool
Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
enabled Boolean
Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
enabled boolean
Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
enabled bool
Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.
enabled Boolean
Whether to allocate nodes on the same Availability Zone or spread across zones available. By default service nodes are spread across different AZs. The single AZ support is best-effort and may temporarily allocate nodes in different AZs e.g. in case of capacity limitations in one AZ.

KafkaKafkaUserConfigTieredStorage
, KafkaKafkaUserConfigTieredStorageArgs

Enabled bool
Whether to enable the tiered storage functionality.
LocalCache KafkaKafkaUserConfigTieredStorageLocalCache
Local cache configuration

Deprecated: This property is deprecated.

Enabled bool
Whether to enable the tiered storage functionality.
LocalCache KafkaKafkaUserConfigTieredStorageLocalCache
Local cache configuration

Deprecated: This property is deprecated.

enabled Boolean
Whether to enable the tiered storage functionality.
localCache KafkaKafkaUserConfigTieredStorageLocalCache
Local cache configuration

Deprecated: This property is deprecated.

enabled boolean
Whether to enable the tiered storage functionality.
localCache KafkaKafkaUserConfigTieredStorageLocalCache
Local cache configuration

Deprecated: This property is deprecated.

enabled bool
Whether to enable the tiered storage functionality.
local_cache KafkaKafkaUserConfigTieredStorageLocalCache
Local cache configuration

Deprecated: This property is deprecated.

enabled Boolean
Whether to enable the tiered storage functionality.
localCache Property Map
Local cache configuration

Deprecated: This property is deprecated.

KafkaKafkaUserConfigTieredStorageLocalCache
, KafkaKafkaUserConfigTieredStorageLocalCacheArgs

Size int
Local cache size in bytes. Example: 1073741824.

Deprecated: This property is deprecated.

Size int
Local cache size in bytes. Example: 1073741824.

Deprecated: This property is deprecated.

size Integer
Local cache size in bytes. Example: 1073741824.

Deprecated: This property is deprecated.

size number
Local cache size in bytes. Example: 1073741824.

Deprecated: This property is deprecated.

size int
Local cache size in bytes. Example: 1073741824.

Deprecated: This property is deprecated.

size Number
Local cache size in bytes. Example: 1073741824.

Deprecated: This property is deprecated.

KafkaServiceIntegration
, KafkaServiceIntegrationArgs

IntegrationType This property is required. string
Type of the service integration
SourceServiceName This property is required. string
Name of the source service
IntegrationType This property is required. string
Type of the service integration
SourceServiceName This property is required. string
Name of the source service
integrationType This property is required. String
Type of the service integration
sourceServiceName This property is required. String
Name of the source service
integrationType This property is required. string
Type of the service integration
sourceServiceName This property is required. string
Name of the source service
integration_type This property is required. str
Type of the service integration
source_service_name This property is required. str
Name of the source service
integrationType This property is required. String
Type of the service integration
sourceServiceName This property is required. String
Name of the source service

KafkaTag
, KafkaTagArgs

Key This property is required. string
Service tag key
Value This property is required. string
Service tag value
Key This property is required. string
Service tag key
Value This property is required. string
Service tag value
key This property is required. String
Service tag key
value This property is required. String
Service tag value
key This property is required. string
Service tag key
value This property is required. string
Service tag value
key This property is required. str
Service tag key
value This property is required. str
Service tag value
key This property is required. String
Service tag key
value This property is required. String
Service tag value

KafkaTechEmail
, KafkaTechEmailArgs

Email This property is required. string
An email address to contact for technical issues
Email This property is required. string
An email address to contact for technical issues
email This property is required. String
An email address to contact for technical issues
email This property is required. string
An email address to contact for technical issues
email This property is required. str
An email address to contact for technical issues
email This property is required. String
An email address to contact for technical issues

Import

$ pulumi import aiven:index/kafka:Kafka example_kafka PROJECT/SERVICE_NAME
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Aiven pulumi/pulumi-aiven
License
Apache-2.0
Notes
This Pulumi package is based on the aiven Terraform Provider.