fetch_request.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. package sarama
  2. import "fmt"
  3. type fetchRequestBlock struct {
  4. Version int16
  5. // currentLeaderEpoch contains the current leader epoch of the partition.
  6. currentLeaderEpoch int32
  7. // fetchOffset contains the message offset.
  8. fetchOffset int64
  9. // logStartOffset contains the earliest available offset of the follower
  10. // replica. The field is only used when the request is sent by the
  11. // follower.
  12. logStartOffset int64
  13. // maxBytes contains the maximum bytes to fetch from this partition. See
  14. // KIP-74 for cases where this limit may not be honored.
  15. maxBytes int32
  16. }
  17. func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
  18. b.Version = version
  19. if b.Version >= 9 {
  20. pe.putInt32(b.currentLeaderEpoch)
  21. }
  22. pe.putInt64(b.fetchOffset)
  23. if b.Version >= 5 {
  24. pe.putInt64(b.logStartOffset)
  25. }
  26. pe.putInt32(b.maxBytes)
  27. return nil
  28. }
  29. func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
  30. b.Version = version
  31. if b.Version >= 9 {
  32. if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
  33. return err
  34. }
  35. }
  36. if b.fetchOffset, err = pd.getInt64(); err != nil {
  37. return err
  38. }
  39. if b.Version >= 5 {
  40. if b.logStartOffset, err = pd.getInt64(); err != nil {
  41. return err
  42. }
  43. }
  44. if b.maxBytes, err = pd.getInt32(); err != nil {
  45. return err
  46. }
  47. return nil
  48. }
  49. // FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
  50. // https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
  51. // https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
  52. type FetchRequest struct {
  53. // Version defines the protocol version to use for encode and decode
  54. Version int16
  55. // ReplicaID contains the broker ID of the follower, of -1 if this request
  56. // is from a consumer.
  57. // ReplicaID int32
  58. // MaxWaitTime contains the maximum time in milliseconds to wait for the response.
  59. MaxWaitTime int32
  60. // MinBytes contains the minimum bytes to accumulate in the response.
  61. MinBytes int32
  62. // MaxBytes contains the maximum bytes to fetch. See KIP-74 for cases
  63. // where this limit may not be honored.
  64. MaxBytes int32
  65. // Isolation contains a This setting controls the visibility of
  66. // transactional records. Using READ_UNCOMMITTED (isolation_level = 0)
  67. // makes all records visible. With READ_COMMITTED (isolation_level = 1),
  68. // non-transactional and COMMITTED transactional records are visible. To be
  69. // more concrete, READ_COMMITTED returns all data from offsets smaller than
  70. // the current LSO (last stable offset), and enables the inclusion of the
  71. // list of aborted transactions in the result, which allows consumers to
  72. // discard ABORTED transactional records
  73. Isolation IsolationLevel
  74. // SessionID contains the fetch session ID.
  75. SessionID int32
  76. // SessionEpoch contains the epoch of the partition leader as known to the
  77. // follower replica or a consumer.
  78. SessionEpoch int32
  79. // blocks contains the topics to fetch.
  80. blocks map[string]map[int32]*fetchRequestBlock
  81. // forgotten contains in an incremental fetch request, the partitions to remove.
  82. forgotten map[string][]int32
  83. // RackID contains a Rack ID of the consumer making this request
  84. RackID string
  85. }
  86. type IsolationLevel int8
  87. const (
  88. ReadUncommitted IsolationLevel = iota
  89. ReadCommitted
  90. )
  91. func (r *FetchRequest) encode(pe packetEncoder) (err error) {
  92. metricRegistry := pe.metricRegistry()
  93. pe.putInt32(-1) // ReplicaID is always -1 for clients
  94. pe.putInt32(r.MaxWaitTime)
  95. pe.putInt32(r.MinBytes)
  96. if r.Version >= 3 {
  97. pe.putInt32(r.MaxBytes)
  98. }
  99. if r.Version >= 4 {
  100. pe.putInt8(int8(r.Isolation))
  101. }
  102. if r.Version >= 7 {
  103. pe.putInt32(r.SessionID)
  104. pe.putInt32(r.SessionEpoch)
  105. }
  106. err = pe.putArrayLength(len(r.blocks))
  107. if err != nil {
  108. return err
  109. }
  110. for topic, blocks := range r.blocks {
  111. err = pe.putString(topic)
  112. if err != nil {
  113. return err
  114. }
  115. err = pe.putArrayLength(len(blocks))
  116. if err != nil {
  117. return err
  118. }
  119. for partition, block := range blocks {
  120. pe.putInt32(partition)
  121. err = block.encode(pe, r.Version)
  122. if err != nil {
  123. return err
  124. }
  125. }
  126. getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1)
  127. }
  128. if r.Version >= 7 {
  129. err = pe.putArrayLength(len(r.forgotten))
  130. if err != nil {
  131. return err
  132. }
  133. for topic, partitions := range r.forgotten {
  134. err = pe.putString(topic)
  135. if err != nil {
  136. return err
  137. }
  138. err = pe.putArrayLength(len(partitions))
  139. if err != nil {
  140. return err
  141. }
  142. for _, partition := range partitions {
  143. pe.putInt32(partition)
  144. }
  145. }
  146. }
  147. if r.Version >= 11 {
  148. err = pe.putString(r.RackID)
  149. if err != nil {
  150. return err
  151. }
  152. }
  153. return nil
  154. }
  155. func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
  156. r.Version = version
  157. if _, err = pd.getInt32(); err != nil {
  158. return err
  159. }
  160. if r.MaxWaitTime, err = pd.getInt32(); err != nil {
  161. return err
  162. }
  163. if r.MinBytes, err = pd.getInt32(); err != nil {
  164. return err
  165. }
  166. if r.Version >= 3 {
  167. if r.MaxBytes, err = pd.getInt32(); err != nil {
  168. return err
  169. }
  170. }
  171. if r.Version >= 4 {
  172. isolation, err := pd.getInt8()
  173. if err != nil {
  174. return err
  175. }
  176. r.Isolation = IsolationLevel(isolation)
  177. }
  178. if r.Version >= 7 {
  179. r.SessionID, err = pd.getInt32()
  180. if err != nil {
  181. return err
  182. }
  183. r.SessionEpoch, err = pd.getInt32()
  184. if err != nil {
  185. return err
  186. }
  187. }
  188. topicCount, err := pd.getArrayLength()
  189. if err != nil {
  190. return err
  191. }
  192. if topicCount == 0 {
  193. return nil
  194. }
  195. r.blocks = make(map[string]map[int32]*fetchRequestBlock)
  196. for i := 0; i < topicCount; i++ {
  197. topic, err := pd.getString()
  198. if err != nil {
  199. return err
  200. }
  201. partitionCount, err := pd.getArrayLength()
  202. if err != nil {
  203. return err
  204. }
  205. r.blocks[topic] = make(map[int32]*fetchRequestBlock)
  206. for j := 0; j < partitionCount; j++ {
  207. partition, err := pd.getInt32()
  208. if err != nil {
  209. return err
  210. }
  211. fetchBlock := &fetchRequestBlock{}
  212. if err = fetchBlock.decode(pd, r.Version); err != nil {
  213. return err
  214. }
  215. r.blocks[topic][partition] = fetchBlock
  216. }
  217. }
  218. if r.Version >= 7 {
  219. forgottenCount, err := pd.getArrayLength()
  220. if err != nil {
  221. return err
  222. }
  223. r.forgotten = make(map[string][]int32)
  224. for i := 0; i < forgottenCount; i++ {
  225. topic, err := pd.getString()
  226. if err != nil {
  227. return err
  228. }
  229. partitionCount, err := pd.getArrayLength()
  230. if err != nil {
  231. return err
  232. }
  233. if partitionCount < 0 {
  234. return fmt.Errorf("partitionCount %d is invalid", partitionCount)
  235. }
  236. r.forgotten[topic] = make([]int32, partitionCount)
  237. for j := 0; j < partitionCount; j++ {
  238. partition, err := pd.getInt32()
  239. if err != nil {
  240. return err
  241. }
  242. r.forgotten[topic][j] = partition
  243. }
  244. }
  245. }
  246. if r.Version >= 11 {
  247. r.RackID, err = pd.getString()
  248. if err != nil {
  249. return err
  250. }
  251. }
  252. return nil
  253. }
  254. func (r *FetchRequest) key() int16 {
  255. return 1
  256. }
  257. func (r *FetchRequest) version() int16 {
  258. return r.Version
  259. }
  260. func (r *FetchRequest) headerVersion() int16 {
  261. return 1
  262. }
  263. func (r *FetchRequest) isValidVersion() bool {
  264. return r.Version >= 0 && r.Version <= 11
  265. }
  266. func (r *FetchRequest) requiredVersion() KafkaVersion {
  267. switch r.Version {
  268. case 11:
  269. return V2_3_0_0
  270. case 9, 10:
  271. return V2_1_0_0
  272. case 8:
  273. return V2_0_0_0
  274. case 7:
  275. return V1_1_0_0
  276. case 6:
  277. return V1_0_0_0
  278. case 4, 5:
  279. return V0_11_0_0
  280. case 3:
  281. return V0_10_1_0
  282. case 2:
  283. return V0_10_0_0
  284. case 1:
  285. return V0_9_0_0
  286. case 0:
  287. return V0_8_2_0
  288. default:
  289. return V2_3_0_0
  290. }
  291. }
  292. func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) {
  293. if r.blocks == nil {
  294. r.blocks = make(map[string]map[int32]*fetchRequestBlock)
  295. }
  296. if r.Version >= 7 && r.forgotten == nil {
  297. r.forgotten = make(map[string][]int32)
  298. }
  299. if r.blocks[topic] == nil {
  300. r.blocks[topic] = make(map[int32]*fetchRequestBlock)
  301. }
  302. tmp := new(fetchRequestBlock)
  303. tmp.Version = r.Version
  304. tmp.maxBytes = maxBytes
  305. tmp.fetchOffset = fetchOffset
  306. if r.Version >= 9 {
  307. tmp.currentLeaderEpoch = leaderEpoch
  308. }
  309. r.blocks[topic][partitionID] = tmp
  310. }