Packages

object Consumer

Linear Supertypes
Ordering
  1. Alphabetic
  2. By Inheritance
Inherited
  1. Consumer
  2. AnyRef
  3. Any
  1. Hide All
  2. Show All
Visibility
  1. Public
  2. Protected

Type Members

  1. sealed trait AutoOffsetStrategy extends AnyRef
  2. sealed trait OffsetRetrieval extends AnyRef

Value Members

  1. final def !=(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  2. final def ##: Int
    Definition Classes
    AnyRef → Any
  3. final def ==(arg0: Any): Boolean
    Definition Classes
    AnyRef → Any
  4. final def asInstanceOf[T0]: T0
    Definition Classes
    Any
  5. def assignment: RIO[Consumer, Set[TopicPartition]]

    Accessor method

  6. def beginningOffsets(partitions: Set[TopicPartition], timeout: zio.Duration = Duration.Infinity): RIO[Consumer, Map[TopicPartition, Long]]

    Accessor method

  7. def clone(): AnyRef
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.CloneNotSupportedException]) @native()
  8. def committed(partitions: Set[TopicPartition], timeout: zio.Duration = Duration.Infinity): RIO[Consumer, Map[TopicPartition, Option[OffsetAndMetadata]]]

    Accessor method

  9. def consumeWith[R, R1, K, V](settings: ConsumerSettings, subscription: Subscription, keyDeserializer: Deserializer[R, K], valueDeserializer: Deserializer[R, V], commitRetryPolicy: Schedule[Any, Any, Any] = Schedule.exponential(1.second) && Schedule.recurs(3))(f: (ConsumerRecord[K, V]) => URIO[R1, Unit])(implicit arg0: zio.EnvironmentTag[R], arg1: zio.EnvironmentTag[R1]): RIO[zio.&[R, R1], Unit]

    Execute an effect for each record and commit the offset after processing

    Execute an effect for each record and commit the offset after processing

    This method is the easiest way of processing messages on a Kafka topic.

    Messages on a single partition are processed sequentially, while the processing of multiple partitions happens in parallel.

    Offsets are committed after execution of the effect. They are batched when a commit action is in progress to avoid backpressuring the stream. When commits fail due to a org.apache.kafka.clients.consumer.RetriableCommitFailedException they are retried according to commitRetryPolicy

    The effect should absorb any failures. Failures should be handled by retries or ignoring the error, which will result in the Kafka message being skipped.

    Messages are processed with 'at least once' consistency: it is not guaranteed that every message that is processed by the effect has a corresponding offset commit before stream termination.

    Usage example:

    val settings: ConsumerSettings = ???
    val subscription = Subscription.Topics(Set("my-kafka-topic"))
    
    val consumerIO = Consumer.consumeWith(settings, subscription, Serdes.string, Serdes.string) { record =>
      // Process the received record here
      putStrLn(s"Received record: ${record.key()}: ${record.value()}")
    }
    R

    Environment for the consuming effect

    R1

    Environment for the deserializers

    K

    Type of keys (an implicit Deserializer should be in scope)

    V

    Type of values (an implicit Deserializer should be in scope)

    settings

    Settings for creating a Consumer

    subscription

    Topic subscription parameters

    keyDeserializer

    Deserializer for the key of the messages

    valueDeserializer

    Deserializer for the value of the messages

    commitRetryPolicy

    Retry commits that failed due to a RetriableCommitFailedException according to this schedule

    f

    Function that returns the effect to execute for each message. It is passed the org.apache.kafka.clients.consumer.ConsumerRecord.

    returns

    Effect that completes with a unit value only when interrupted. May fail when the Consumer fails.

  10. def endOffsets(partitions: Set[TopicPartition], timeout: zio.Duration = Duration.Infinity): RIO[Consumer, Map[TopicPartition, Long]]

    Accessor method

  11. final def eq(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  12. def equals(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef → Any
  13. def finalize(): Unit
    Attributes
    protected[lang]
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.Throwable])
  14. def fromJavaConsumer(javaConsumer: KafkaConsumer[Array[Byte], Array[Byte]], settings: ConsumerSettings, diagnostics: Diagnostics = Diagnostics.NoOp): ZIO[Scope, Throwable, Consumer]

    Create a zio-kafka Consumer from an org.apache.kafka KafkaConsumer

    Create a zio-kafka Consumer from an org.apache.kafka KafkaConsumer

    You are responsible for creating and closing the KafkaConsumer

  15. final def getClass(): Class[_ <: AnyRef]
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  16. def hashCode(): Int
    Definition Classes
    AnyRef → Any
    Annotations
    @native()
  17. final def isInstanceOf[T0]: Boolean
    Definition Classes
    Any
  18. def listTopics(timeout: zio.Duration = Duration.Infinity): RIO[Consumer, Map[String, List[PartitionInfo]]]

    Accessor method

  19. def live: RLayer[zio.&[ConsumerSettings, Diagnostics], Consumer]
  20. def make(settings: ConsumerSettings, diagnostics: Diagnostics = Diagnostics.NoOp): ZIO[Scope, Throwable, Consumer]
  21. def metrics: RIO[Consumer, Map[MetricName, Metric]]

    Accessor method

  22. final def ne(arg0: AnyRef): Boolean
    Definition Classes
    AnyRef
  23. final def notify(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  24. final def notifyAll(): Unit
    Definition Classes
    AnyRef
    Annotations
    @native()
  25. val offsetBatches: ZSink[Any, Nothing, Offset, Nothing, OffsetBatch]
  26. def offsetsForTimes(timestamps: Map[TopicPartition, Long], timeout: zio.Duration = Duration.Infinity): RIO[Consumer, Map[TopicPartition, OffsetAndTimestamp]]

    Accessor method

  27. def partitionedAssignmentStream[R, K, V](subscription: Subscription, keyDeserializer: Deserializer[R, K], valueDeserializer: Deserializer[R, V]): ZStream[Consumer, Throwable, Chunk[(TopicPartition, ZStream[R, Throwable, CommittableRecord[K, V]])]]

    Accessor method

  28. def partitionedStream[R, K, V](subscription: Subscription, keyDeserializer: Deserializer[R, K], valueDeserializer: Deserializer[R, V]): ZStream[Consumer, Throwable, (TopicPartition, ZStream[R, Throwable, CommittableRecord[K, V]])]

    Accessor method

  29. def partitionsFor(topic: String, timeout: zio.Duration = Duration.Infinity): RIO[Consumer, List[PartitionInfo]]

    Accessor method

  30. def plainStream[R, K, V](subscription: Subscription, keyDeserializer: Deserializer[R, K], valueDeserializer: Deserializer[R, V], bufferSize: Int = 4): ZStream[zio.&[R, Consumer], Throwable, CommittableRecord[K, V]]

    Accessor method

  31. def position(partition: TopicPartition, timeout: zio.Duration = Duration.Infinity): RIO[Consumer, Long]

    Accessor method

  32. def stopConsumption: RIO[Consumer, Unit]

    Accessor method

  33. def subscription: RIO[Consumer, Set[String]]

    Accessor method

  34. final def synchronized[T0](arg0: => T0): T0
    Definition Classes
    AnyRef
  35. def toString(): String
    Definition Classes
    AnyRef → Any
  36. final def wait(): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException])
  37. final def wait(arg0: Long, arg1: Int): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException])
  38. final def wait(arg0: Long): Unit
    Definition Classes
    AnyRef
    Annotations
    @throws(classOf[java.lang.InterruptedException]) @native()
  39. object AutoOffsetStrategy
  40. case object CommitTimeout extends RuntimeException with NoStackTrace with Product with Serializable
  41. object OffsetRetrieval
  42. case object RunloopTimeout extends RuntimeException with NoStackTrace with Product with Serializable

Inherited from AnyRef

Inherited from Any

Ungrouped