produce_set.go 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. package sarama
  2. import (
  3. "encoding/binary"
  4. "errors"
  5. "time"
  6. )
  7. type partitionSet struct {
  8. msgs []*ProducerMessage
  9. recordsToSend Records
  10. bufferBytes int
  11. }
  12. type produceSet struct {
  13. parent *asyncProducer
  14. msgs map[string]map[int32]*partitionSet
  15. producerID int64
  16. producerEpoch int16
  17. bufferBytes int
  18. bufferCount int
  19. }
  20. func newProduceSet(parent *asyncProducer) *produceSet {
  21. pid, epoch := parent.txnmgr.getProducerID()
  22. return &produceSet{
  23. msgs: make(map[string]map[int32]*partitionSet),
  24. parent: parent,
  25. producerID: pid,
  26. producerEpoch: epoch,
  27. }
  28. }
  29. func (ps *produceSet) add(msg *ProducerMessage) error {
  30. var err error
  31. var key, val []byte
  32. if msg.Key != nil {
  33. if key, err = msg.Key.Encode(); err != nil {
  34. return err
  35. }
  36. }
  37. if msg.Value != nil {
  38. if val, err = msg.Value.Encode(); err != nil {
  39. return err
  40. }
  41. }
  42. timestamp := msg.Timestamp
  43. if timestamp.IsZero() {
  44. timestamp = time.Now()
  45. }
  46. timestamp = timestamp.Truncate(time.Millisecond)
  47. partitions := ps.msgs[msg.Topic]
  48. if partitions == nil {
  49. partitions = make(map[int32]*partitionSet)
  50. ps.msgs[msg.Topic] = partitions
  51. }
  52. var size int
  53. set := partitions[msg.Partition]
  54. if set == nil {
  55. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  56. batch := &RecordBatch{
  57. FirstTimestamp: timestamp,
  58. Version: 2,
  59. Codec: ps.parent.conf.Producer.Compression,
  60. CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
  61. ProducerID: ps.producerID,
  62. ProducerEpoch: ps.producerEpoch,
  63. }
  64. if ps.parent.conf.Producer.Idempotent {
  65. batch.FirstSequence = msg.sequenceNumber
  66. }
  67. set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
  68. size = recordBatchOverhead
  69. } else {
  70. set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
  71. }
  72. partitions[msg.Partition] = set
  73. }
  74. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  75. if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
  76. return errors.New("assertion failed: message out of sequence added to a batch")
  77. }
  78. }
  79. // Past this point we can't return an error, because we've already added the message to the set.
  80. set.msgs = append(set.msgs, msg)
  81. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  82. // We are being conservative here to avoid having to prep encode the record
  83. size += maximumRecordOverhead
  84. rec := &Record{
  85. Key: key,
  86. Value: val,
  87. TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
  88. }
  89. size += len(key) + len(val)
  90. if len(msg.Headers) > 0 {
  91. rec.Headers = make([]*RecordHeader, len(msg.Headers))
  92. for i := range msg.Headers {
  93. rec.Headers[i] = &msg.Headers[i]
  94. size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
  95. }
  96. }
  97. set.recordsToSend.RecordBatch.addRecord(rec)
  98. } else {
  99. msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
  100. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  101. msgToSend.Timestamp = timestamp
  102. msgToSend.Version = 1
  103. }
  104. set.recordsToSend.MsgSet.addMessage(msgToSend)
  105. size = producerMessageOverhead + len(key) + len(val)
  106. }
  107. set.bufferBytes += size
  108. ps.bufferBytes += size
  109. ps.bufferCount++
  110. return nil
  111. }
  112. func (ps *produceSet) buildRequest() *ProduceRequest {
  113. req := &ProduceRequest{
  114. RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
  115. Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
  116. }
  117. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  118. req.Version = 2
  119. }
  120. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  121. req.Version = 3
  122. if ps.parent.IsTransactional() {
  123. req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID
  124. }
  125. }
  126. if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) {
  127. req.Version = 5
  128. }
  129. if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) {
  130. req.Version = 6
  131. }
  132. if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
  133. req.Version = 7
  134. }
  135. for topic, partitionSets := range ps.msgs {
  136. for partition, set := range partitionSets {
  137. if req.Version >= 3 {
  138. // If the API version we're hitting is 3 or greater, we need to calculate
  139. // offsets for each record in the batch relative to FirstOffset.
  140. // Additionally, we must set LastOffsetDelta to the value of the last offset
  141. // in the batch. Since the OffsetDelta of the first record is 0, we know that the
  142. // final record of any batch will have an offset of (# of records in batch) - 1.
  143. // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
  144. // under the RecordBatch section for details.)
  145. rb := set.recordsToSend.RecordBatch
  146. if len(rb.Records) > 0 {
  147. rb.LastOffsetDelta = int32(len(rb.Records) - 1)
  148. for i, record := range rb.Records {
  149. record.OffsetDelta = int64(i)
  150. }
  151. }
  152. // Set the batch as transactional when a transactionalID is set
  153. rb.IsTransactional = ps.parent.IsTransactional()
  154. req.AddBatch(topic, partition, rb)
  155. continue
  156. }
  157. if ps.parent.conf.Producer.Compression == CompressionNone {
  158. req.AddSet(topic, partition, set.recordsToSend.MsgSet)
  159. } else {
  160. // When compression is enabled, the entire set for each partition is compressed
  161. // and sent as the payload of a single fake "message" with the appropriate codec
  162. // set and no key. When the server sees a message with a compression codec, it
  163. // decompresses the payload and treats the result as its message set.
  164. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  165. // If our version is 0.10 or later, assign relative offsets
  166. // to the inner messages. This lets the broker avoid
  167. // recompressing the message set.
  168. // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
  169. // for details on relative offsets.)
  170. for i, msg := range set.recordsToSend.MsgSet.Messages {
  171. msg.Offset = int64(i)
  172. }
  173. }
  174. payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry)
  175. if err != nil {
  176. Logger.Println(err) // if this happens, it's basically our fault.
  177. panic(err)
  178. }
  179. compMsg := &Message{
  180. Codec: ps.parent.conf.Producer.Compression,
  181. CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
  182. Key: nil,
  183. Value: payload,
  184. Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
  185. }
  186. if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
  187. compMsg.Version = 1
  188. compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
  189. }
  190. req.AddMessage(topic, partition, compMsg)
  191. }
  192. }
  193. }
  194. return req
  195. }
  196. func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
  197. for topic, partitionSet := range ps.msgs {
  198. for partition, set := range partitionSet {
  199. cb(topic, partition, set)
  200. }
  201. }
  202. }
  203. func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
  204. if ps.msgs[topic] == nil {
  205. return nil
  206. }
  207. set := ps.msgs[topic][partition]
  208. if set == nil {
  209. return nil
  210. }
  211. ps.bufferBytes -= set.bufferBytes
  212. ps.bufferCount -= len(set.msgs)
  213. delete(ps.msgs[topic], partition)
  214. return set.msgs
  215. }
  216. func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
  217. version := 1
  218. if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
  219. version = 2
  220. }
  221. switch {
  222. // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
  223. case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)):
  224. return true
  225. // Would we overflow the size-limit of a message-batch for this partition?
  226. case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
  227. ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
  228. return true
  229. // Would we overflow simply in number of messages?
  230. case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
  231. return true
  232. default:
  233. return false
  234. }
  235. }
  236. func (ps *produceSet) readyToFlush() bool {
  237. switch {
  238. // If we don't have any messages, nothing else matters
  239. case ps.empty():
  240. return false
  241. // If all three config values are 0, we always flush as-fast-as-possible
  242. case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
  243. return true
  244. // If we've passed the message trigger-point
  245. case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
  246. return true
  247. // If we've passed the byte trigger-point
  248. case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
  249. return true
  250. default:
  251. return false
  252. }
  253. }
  254. func (ps *produceSet) empty() bool {
  255. return ps.bufferCount == 0
  256. }