blockenc.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909
  1. // Copyright 2019+ Klaus Post. All rights reserved.
  2. // License information can be found in the LICENSE file.
  3. // Based on work by Yann Collet, released under BSD License.
  4. package zstd
  5. import (
  6. "errors"
  7. "fmt"
  8. "math"
  9. "math/bits"
  10. "github.com/klauspost/compress/huff0"
  11. )
  12. type blockEnc struct {
  13. size int
  14. literals []byte
  15. sequences []seq
  16. coders seqCoders
  17. litEnc *huff0.Scratch
  18. dictLitEnc *huff0.Scratch
  19. wr bitWriter
  20. extraLits int
  21. output []byte
  22. recentOffsets [3]uint32
  23. prevRecentOffsets [3]uint32
  24. last bool
  25. lowMem bool
  26. }
  27. // init should be used once the block has been created.
  28. // If called more than once, the effect is the same as calling reset.
  29. func (b *blockEnc) init() {
  30. if b.lowMem {
  31. // 1K literals
  32. if cap(b.literals) < 1<<10 {
  33. b.literals = make([]byte, 0, 1<<10)
  34. }
  35. const defSeqs = 20
  36. if cap(b.sequences) < defSeqs {
  37. b.sequences = make([]seq, 0, defSeqs)
  38. }
  39. // 1K
  40. if cap(b.output) < 1<<10 {
  41. b.output = make([]byte, 0, 1<<10)
  42. }
  43. } else {
  44. if cap(b.literals) < maxCompressedBlockSize {
  45. b.literals = make([]byte, 0, maxCompressedBlockSize)
  46. }
  47. const defSeqs = 2000
  48. if cap(b.sequences) < defSeqs {
  49. b.sequences = make([]seq, 0, defSeqs)
  50. }
  51. if cap(b.output) < maxCompressedBlockSize {
  52. b.output = make([]byte, 0, maxCompressedBlockSize)
  53. }
  54. }
  55. if b.coders.mlEnc == nil {
  56. b.coders.mlEnc = &fseEncoder{}
  57. b.coders.mlPrev = &fseEncoder{}
  58. b.coders.ofEnc = &fseEncoder{}
  59. b.coders.ofPrev = &fseEncoder{}
  60. b.coders.llEnc = &fseEncoder{}
  61. b.coders.llPrev = &fseEncoder{}
  62. }
  63. b.litEnc = &huff0.Scratch{WantLogLess: 4}
  64. b.reset(nil)
  65. }
  66. // initNewEncode can be used to reset offsets and encoders to the initial state.
  67. func (b *blockEnc) initNewEncode() {
  68. b.recentOffsets = [3]uint32{1, 4, 8}
  69. b.litEnc.Reuse = huff0.ReusePolicyNone
  70. b.coders.setPrev(nil, nil, nil)
  71. }
  72. // reset will reset the block for a new encode, but in the same stream,
  73. // meaning that state will be carried over, but the block content is reset.
  74. // If a previous block is provided, the recent offsets are carried over.
  75. func (b *blockEnc) reset(prev *blockEnc) {
  76. b.extraLits = 0
  77. b.literals = b.literals[:0]
  78. b.size = 0
  79. b.sequences = b.sequences[:0]
  80. b.output = b.output[:0]
  81. b.last = false
  82. if prev != nil {
  83. b.recentOffsets = prev.prevRecentOffsets
  84. }
  85. b.dictLitEnc = nil
  86. }
  87. // reset will reset the block for a new encode, but in the same stream,
  88. // meaning that state will be carried over, but the block content is reset.
  89. // If a previous block is provided, the recent offsets are carried over.
  90. func (b *blockEnc) swapEncoders(prev *blockEnc) {
  91. b.coders.swap(&prev.coders)
  92. b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
  93. }
  94. // blockHeader contains the information for a block header.
  95. type blockHeader uint32
  96. // setLast sets the 'last' indicator on a block.
  97. func (h *blockHeader) setLast(b bool) {
  98. if b {
  99. *h = *h | 1
  100. } else {
  101. const mask = (1 << 24) - 2
  102. *h = *h & mask
  103. }
  104. }
  105. // setSize will store the compressed size of a block.
  106. func (h *blockHeader) setSize(v uint32) {
  107. const mask = 7
  108. *h = (*h)&mask | blockHeader(v<<3)
  109. }
  110. // setType sets the block type.
  111. func (h *blockHeader) setType(t blockType) {
  112. const mask = 1 | (((1 << 24) - 1) ^ 7)
  113. *h = (*h & mask) | blockHeader(t<<1)
  114. }
  115. // appendTo will append the block header to a slice.
  116. func (h blockHeader) appendTo(b []byte) []byte {
  117. return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
  118. }
  119. // String returns a string representation of the block.
  120. func (h blockHeader) String() string {
  121. return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
  122. }
  123. // literalsHeader contains literals header information.
  124. type literalsHeader uint64
  125. // setType can be used to set the type of literal block.
  126. func (h *literalsHeader) setType(t literalsBlockType) {
  127. const mask = math.MaxUint64 - 3
  128. *h = (*h & mask) | literalsHeader(t)
  129. }
  130. // setSize can be used to set a single size, for uncompressed and RLE content.
  131. func (h *literalsHeader) setSize(regenLen int) {
  132. inBits := bits.Len32(uint32(regenLen))
  133. // Only retain 2 bits
  134. const mask = 3
  135. lh := uint64(*h & mask)
  136. switch {
  137. case inBits < 5:
  138. lh |= (uint64(regenLen) << 3) | (1 << 60)
  139. if debugEncoder {
  140. got := int(lh>>3) & 0xff
  141. if got != regenLen {
  142. panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
  143. }
  144. }
  145. case inBits < 12:
  146. lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
  147. case inBits < 20:
  148. lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
  149. default:
  150. panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
  151. }
  152. *h = literalsHeader(lh)
  153. }
  154. // setSizes will set the size of a compressed literals section and the input length.
  155. func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
  156. compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
  157. // Only retain 2 bits
  158. const mask = 3
  159. lh := uint64(*h & mask)
  160. switch {
  161. case compBits <= 10 && inBits <= 10:
  162. if !single {
  163. lh |= 1 << 2
  164. }
  165. lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
  166. if debugEncoder {
  167. const mmask = (1 << 24) - 1
  168. n := (lh >> 4) & mmask
  169. if int(n&1023) != inLen {
  170. panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
  171. }
  172. if int(n>>10) != compLen {
  173. panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
  174. }
  175. }
  176. case compBits <= 14 && inBits <= 14:
  177. lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
  178. if single {
  179. panic("single stream used with more than 10 bits length.")
  180. }
  181. case compBits <= 18 && inBits <= 18:
  182. lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
  183. if single {
  184. panic("single stream used with more than 10 bits length.")
  185. }
  186. default:
  187. panic("internal error: block too big")
  188. }
  189. *h = literalsHeader(lh)
  190. }
  191. // appendTo will append the literals header to a byte slice.
  192. func (h literalsHeader) appendTo(b []byte) []byte {
  193. size := uint8(h >> 60)
  194. switch size {
  195. case 1:
  196. b = append(b, uint8(h))
  197. case 2:
  198. b = append(b, uint8(h), uint8(h>>8))
  199. case 3:
  200. b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
  201. case 4:
  202. b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
  203. case 5:
  204. b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
  205. default:
  206. panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
  207. }
  208. return b
  209. }
  210. // size returns the output size with currently set values.
  211. func (h literalsHeader) size() int {
  212. return int(h >> 60)
  213. }
  214. func (h literalsHeader) String() string {
  215. return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
  216. }
  217. // pushOffsets will push the recent offsets to the backup store.
  218. func (b *blockEnc) pushOffsets() {
  219. b.prevRecentOffsets = b.recentOffsets
  220. }
  221. // pushOffsets will push the recent offsets to the backup store.
  222. func (b *blockEnc) popOffsets() {
  223. b.recentOffsets = b.prevRecentOffsets
  224. }
  225. // matchOffset will adjust recent offsets and return the adjusted one,
  226. // if it matches a previous offset.
  227. func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
  228. // Check if offset is one of the recent offsets.
  229. // Adjusts the output offset accordingly.
  230. // Gives a tiny bit of compression, typically around 1%.
  231. if true {
  232. if lits > 0 {
  233. switch offset {
  234. case b.recentOffsets[0]:
  235. offset = 1
  236. case b.recentOffsets[1]:
  237. b.recentOffsets[1] = b.recentOffsets[0]
  238. b.recentOffsets[0] = offset
  239. offset = 2
  240. case b.recentOffsets[2]:
  241. b.recentOffsets[2] = b.recentOffsets[1]
  242. b.recentOffsets[1] = b.recentOffsets[0]
  243. b.recentOffsets[0] = offset
  244. offset = 3
  245. default:
  246. b.recentOffsets[2] = b.recentOffsets[1]
  247. b.recentOffsets[1] = b.recentOffsets[0]
  248. b.recentOffsets[0] = offset
  249. offset += 3
  250. }
  251. } else {
  252. switch offset {
  253. case b.recentOffsets[1]:
  254. b.recentOffsets[1] = b.recentOffsets[0]
  255. b.recentOffsets[0] = offset
  256. offset = 1
  257. case b.recentOffsets[2]:
  258. b.recentOffsets[2] = b.recentOffsets[1]
  259. b.recentOffsets[1] = b.recentOffsets[0]
  260. b.recentOffsets[0] = offset
  261. offset = 2
  262. case b.recentOffsets[0] - 1:
  263. b.recentOffsets[2] = b.recentOffsets[1]
  264. b.recentOffsets[1] = b.recentOffsets[0]
  265. b.recentOffsets[0] = offset
  266. offset = 3
  267. default:
  268. b.recentOffsets[2] = b.recentOffsets[1]
  269. b.recentOffsets[1] = b.recentOffsets[0]
  270. b.recentOffsets[0] = offset
  271. offset += 3
  272. }
  273. }
  274. } else {
  275. offset += 3
  276. }
  277. return offset
  278. }
  279. // encodeRaw can be used to set the output to a raw representation of supplied bytes.
  280. func (b *blockEnc) encodeRaw(a []byte) {
  281. var bh blockHeader
  282. bh.setLast(b.last)
  283. bh.setSize(uint32(len(a)))
  284. bh.setType(blockTypeRaw)
  285. b.output = bh.appendTo(b.output[:0])
  286. b.output = append(b.output, a...)
  287. if debugEncoder {
  288. println("Adding RAW block, length", len(a), "last:", b.last)
  289. }
  290. }
  291. // encodeRaw can be used to set the output to a raw representation of supplied bytes.
  292. func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
  293. var bh blockHeader
  294. bh.setLast(b.last)
  295. bh.setSize(uint32(len(src)))
  296. bh.setType(blockTypeRaw)
  297. dst = bh.appendTo(dst)
  298. dst = append(dst, src...)
  299. if debugEncoder {
  300. println("Adding RAW block, length", len(src), "last:", b.last)
  301. }
  302. return dst
  303. }
  304. // encodeLits can be used if the block is only litLen.
  305. func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
  306. var bh blockHeader
  307. bh.setLast(b.last)
  308. bh.setSize(uint32(len(lits)))
  309. // Don't compress extremely small blocks
  310. if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
  311. if debugEncoder {
  312. println("Adding RAW block, length", len(lits), "last:", b.last)
  313. }
  314. bh.setType(blockTypeRaw)
  315. b.output = bh.appendTo(b.output)
  316. b.output = append(b.output, lits...)
  317. return nil
  318. }
  319. var (
  320. out []byte
  321. reUsed, single bool
  322. err error
  323. )
  324. if b.dictLitEnc != nil {
  325. b.litEnc.TransferCTable(b.dictLitEnc)
  326. b.litEnc.Reuse = huff0.ReusePolicyAllow
  327. b.dictLitEnc = nil
  328. }
  329. if len(lits) >= 1024 {
  330. // Use 4 Streams.
  331. out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
  332. } else if len(lits) > 16 {
  333. // Use 1 stream
  334. single = true
  335. out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
  336. } else {
  337. err = huff0.ErrIncompressible
  338. }
  339. if err == nil && len(out)+5 > len(lits) {
  340. // If we are close, we may still be worse or equal to raw.
  341. var lh literalsHeader
  342. lh.setSizes(len(out), len(lits), single)
  343. if len(out)+lh.size() >= len(lits) {
  344. err = huff0.ErrIncompressible
  345. }
  346. }
  347. switch err {
  348. case huff0.ErrIncompressible:
  349. if debugEncoder {
  350. println("Adding RAW block, length", len(lits), "last:", b.last)
  351. }
  352. bh.setType(blockTypeRaw)
  353. b.output = bh.appendTo(b.output)
  354. b.output = append(b.output, lits...)
  355. return nil
  356. case huff0.ErrUseRLE:
  357. if debugEncoder {
  358. println("Adding RLE block, length", len(lits))
  359. }
  360. bh.setType(blockTypeRLE)
  361. b.output = bh.appendTo(b.output)
  362. b.output = append(b.output, lits[0])
  363. return nil
  364. case nil:
  365. default:
  366. return err
  367. }
  368. // Compressed...
  369. // Now, allow reuse
  370. b.litEnc.Reuse = huff0.ReusePolicyAllow
  371. bh.setType(blockTypeCompressed)
  372. var lh literalsHeader
  373. if reUsed {
  374. if debugEncoder {
  375. println("Reused tree, compressed to", len(out))
  376. }
  377. lh.setType(literalsBlockTreeless)
  378. } else {
  379. if debugEncoder {
  380. println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
  381. }
  382. lh.setType(literalsBlockCompressed)
  383. }
  384. // Set sizes
  385. lh.setSizes(len(out), len(lits), single)
  386. bh.setSize(uint32(len(out) + lh.size() + 1))
  387. // Write block headers.
  388. b.output = bh.appendTo(b.output)
  389. b.output = lh.appendTo(b.output)
  390. // Add compressed data.
  391. b.output = append(b.output, out...)
  392. // No sequences.
  393. b.output = append(b.output, 0)
  394. return nil
  395. }
  396. // encodeRLE will encode an RLE block.
  397. func (b *blockEnc) encodeRLE(val byte, length uint32) {
  398. var bh blockHeader
  399. bh.setLast(b.last)
  400. bh.setSize(length)
  401. bh.setType(blockTypeRLE)
  402. b.output = bh.appendTo(b.output)
  403. b.output = append(b.output, val)
  404. }
  405. // fuzzFseEncoder can be used to fuzz the FSE encoder.
  406. func fuzzFseEncoder(data []byte) int {
  407. if len(data) > maxSequences || len(data) < 2 {
  408. return 0
  409. }
  410. enc := fseEncoder{}
  411. hist := enc.Histogram()
  412. maxSym := uint8(0)
  413. for i, v := range data {
  414. v = v & 63
  415. data[i] = v
  416. hist[v]++
  417. if v > maxSym {
  418. maxSym = v
  419. }
  420. }
  421. if maxSym == 0 {
  422. // All 0
  423. return 0
  424. }
  425. maxCount := func(a []uint32) int {
  426. var max uint32
  427. for _, v := range a {
  428. if v > max {
  429. max = v
  430. }
  431. }
  432. return int(max)
  433. }
  434. cnt := maxCount(hist[:maxSym])
  435. if cnt == len(data) {
  436. // RLE
  437. return 0
  438. }
  439. enc.HistogramFinished(maxSym, cnt)
  440. err := enc.normalizeCount(len(data))
  441. if err != nil {
  442. return 0
  443. }
  444. _, err = enc.writeCount(nil)
  445. if err != nil {
  446. panic(err)
  447. }
  448. return 1
  449. }
  450. // encode will encode the block and append the output in b.output.
  451. // Previous offset codes must be pushed if more blocks are expected.
  452. func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
  453. if len(b.sequences) == 0 {
  454. return b.encodeLits(b.literals, rawAllLits)
  455. }
  456. if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
  457. // Check common RLE cases.
  458. seq := b.sequences[0]
  459. if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
  460. // Offset == 1 and 0 or 1 literals.
  461. b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
  462. return nil
  463. }
  464. }
  465. // We want some difference to at least account for the headers.
  466. saved := b.size - len(b.literals) - (b.size >> 6)
  467. if saved < 16 {
  468. if org == nil {
  469. return errIncompressible
  470. }
  471. b.popOffsets()
  472. return b.encodeLits(org, rawAllLits)
  473. }
  474. var bh blockHeader
  475. var lh literalsHeader
  476. bh.setLast(b.last)
  477. bh.setType(blockTypeCompressed)
  478. // Store offset of the block header. Needed when we know the size.
  479. bhOffset := len(b.output)
  480. b.output = bh.appendTo(b.output)
  481. var (
  482. out []byte
  483. reUsed, single bool
  484. err error
  485. )
  486. if b.dictLitEnc != nil {
  487. b.litEnc.TransferCTable(b.dictLitEnc)
  488. b.litEnc.Reuse = huff0.ReusePolicyAllow
  489. b.dictLitEnc = nil
  490. }
  491. if len(b.literals) >= 1024 && !raw {
  492. // Use 4 Streams.
  493. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
  494. } else if len(b.literals) > 16 && !raw {
  495. // Use 1 stream
  496. single = true
  497. out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
  498. } else {
  499. err = huff0.ErrIncompressible
  500. }
  501. if err == nil && len(out)+5 > len(b.literals) {
  502. // If we are close, we may still be worse or equal to raw.
  503. var lh literalsHeader
  504. lh.setSize(len(b.literals))
  505. szRaw := lh.size()
  506. lh.setSizes(len(out), len(b.literals), single)
  507. szComp := lh.size()
  508. if len(out)+szComp >= len(b.literals)+szRaw {
  509. err = huff0.ErrIncompressible
  510. }
  511. }
  512. switch err {
  513. case huff0.ErrIncompressible:
  514. lh.setType(literalsBlockRaw)
  515. lh.setSize(len(b.literals))
  516. b.output = lh.appendTo(b.output)
  517. b.output = append(b.output, b.literals...)
  518. if debugEncoder {
  519. println("Adding literals RAW, length", len(b.literals))
  520. }
  521. case huff0.ErrUseRLE:
  522. lh.setType(literalsBlockRLE)
  523. lh.setSize(len(b.literals))
  524. b.output = lh.appendTo(b.output)
  525. b.output = append(b.output, b.literals[0])
  526. if debugEncoder {
  527. println("Adding literals RLE")
  528. }
  529. case nil:
  530. // Compressed litLen...
  531. if reUsed {
  532. if debugEncoder {
  533. println("reused tree")
  534. }
  535. lh.setType(literalsBlockTreeless)
  536. } else {
  537. if debugEncoder {
  538. println("new tree, size:", len(b.litEnc.OutTable))
  539. }
  540. lh.setType(literalsBlockCompressed)
  541. if debugEncoder {
  542. _, _, err := huff0.ReadTable(out, nil)
  543. if err != nil {
  544. panic(err)
  545. }
  546. }
  547. }
  548. lh.setSizes(len(out), len(b.literals), single)
  549. if debugEncoder {
  550. printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
  551. println("Adding literal header:", lh)
  552. }
  553. b.output = lh.appendTo(b.output)
  554. b.output = append(b.output, out...)
  555. b.litEnc.Reuse = huff0.ReusePolicyAllow
  556. if debugEncoder {
  557. println("Adding literals compressed")
  558. }
  559. default:
  560. if debugEncoder {
  561. println("Adding literals ERROR:", err)
  562. }
  563. return err
  564. }
  565. // Sequence compression
  566. // Write the number of sequences
  567. switch {
  568. case len(b.sequences) < 128:
  569. b.output = append(b.output, uint8(len(b.sequences)))
  570. case len(b.sequences) < 0x7f00: // TODO: this could be wrong
  571. n := len(b.sequences)
  572. b.output = append(b.output, 128+uint8(n>>8), uint8(n))
  573. default:
  574. n := len(b.sequences) - 0x7f00
  575. b.output = append(b.output, 255, uint8(n), uint8(n>>8))
  576. }
  577. if debugEncoder {
  578. println("Encoding", len(b.sequences), "sequences")
  579. }
  580. b.genCodes()
  581. llEnc := b.coders.llEnc
  582. ofEnc := b.coders.ofEnc
  583. mlEnc := b.coders.mlEnc
  584. err = llEnc.normalizeCount(len(b.sequences))
  585. if err != nil {
  586. return err
  587. }
  588. err = ofEnc.normalizeCount(len(b.sequences))
  589. if err != nil {
  590. return err
  591. }
  592. err = mlEnc.normalizeCount(len(b.sequences))
  593. if err != nil {
  594. return err
  595. }
  596. // Choose the best compression mode for each type.
  597. // Will evaluate the new vs predefined and previous.
  598. chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
  599. // See if predefined/previous is better
  600. hist := cur.count[:cur.symbolLen]
  601. nSize := cur.approxSize(hist) + cur.maxHeaderSize()
  602. predefSize := preDef.approxSize(hist)
  603. prevSize := prev.approxSize(hist)
  604. // Add a small penalty for new encoders.
  605. // Don't bother with extremely small (<2 byte gains).
  606. nSize = nSize + (nSize+2*8*16)>>4
  607. switch {
  608. case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
  609. if debugEncoder {
  610. println("Using predefined", predefSize>>3, "<=", nSize>>3)
  611. }
  612. return preDef, compModePredefined
  613. case prevSize <= nSize:
  614. if debugEncoder {
  615. println("Using previous", prevSize>>3, "<=", nSize>>3)
  616. }
  617. return prev, compModeRepeat
  618. default:
  619. if debugEncoder {
  620. println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
  621. println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
  622. }
  623. return cur, compModeFSE
  624. }
  625. }
  626. // Write compression mode
  627. var mode uint8
  628. if llEnc.useRLE {
  629. mode |= uint8(compModeRLE) << 6
  630. llEnc.setRLE(b.sequences[0].llCode)
  631. if debugEncoder {
  632. println("llEnc.useRLE")
  633. }
  634. } else {
  635. var m seqCompMode
  636. llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
  637. mode |= uint8(m) << 6
  638. }
  639. if ofEnc.useRLE {
  640. mode |= uint8(compModeRLE) << 4
  641. ofEnc.setRLE(b.sequences[0].ofCode)
  642. if debugEncoder {
  643. println("ofEnc.useRLE")
  644. }
  645. } else {
  646. var m seqCompMode
  647. ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
  648. mode |= uint8(m) << 4
  649. }
  650. if mlEnc.useRLE {
  651. mode |= uint8(compModeRLE) << 2
  652. mlEnc.setRLE(b.sequences[0].mlCode)
  653. if debugEncoder {
  654. println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
  655. }
  656. } else {
  657. var m seqCompMode
  658. mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
  659. mode |= uint8(m) << 2
  660. }
  661. b.output = append(b.output, mode)
  662. if debugEncoder {
  663. printf("Compression modes: 0b%b", mode)
  664. }
  665. b.output, err = llEnc.writeCount(b.output)
  666. if err != nil {
  667. return err
  668. }
  669. start := len(b.output)
  670. b.output, err = ofEnc.writeCount(b.output)
  671. if err != nil {
  672. return err
  673. }
  674. if false {
  675. println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
  676. fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
  677. for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
  678. fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
  679. }
  680. }
  681. b.output, err = mlEnc.writeCount(b.output)
  682. if err != nil {
  683. return err
  684. }
  685. // Maybe in block?
  686. wr := &b.wr
  687. wr.reset(b.output)
  688. var ll, of, ml cState
  689. // Current sequence
  690. seq := len(b.sequences) - 1
  691. s := b.sequences[seq]
  692. llEnc.setBits(llBitsTable[:])
  693. mlEnc.setBits(mlBitsTable[:])
  694. ofEnc.setBits(nil)
  695. llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
  696. // We have 3 bounds checks here (and in the loop).
  697. // Since we are iterating backwards it is kinda hard to avoid.
  698. llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
  699. ll.init(wr, &llEnc.ct, llB)
  700. of.init(wr, &ofEnc.ct, ofB)
  701. wr.flush32()
  702. ml.init(wr, &mlEnc.ct, mlB)
  703. // Each of these lookups also generates a bounds check.
  704. wr.addBits32NC(s.litLen, llB.outBits)
  705. wr.addBits32NC(s.matchLen, mlB.outBits)
  706. wr.flush32()
  707. wr.addBits32NC(s.offset, ofB.outBits)
  708. if debugSequences {
  709. println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
  710. }
  711. seq--
  712. // Store sequences in reverse...
  713. for seq >= 0 {
  714. s = b.sequences[seq]
  715. ofB := ofTT[s.ofCode]
  716. wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
  717. //of.encode(ofB)
  718. nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
  719. dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
  720. wr.addBits16NC(of.state, uint8(nbBitsOut))
  721. of.state = of.stateTable[dstState]
  722. // Accumulate extra bits.
  723. outBits := ofB.outBits & 31
  724. extraBits := uint64(s.offset & bitMask32[outBits])
  725. extraBitsN := outBits
  726. mlB := mlTT[s.mlCode]
  727. //ml.encode(mlB)
  728. nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
  729. dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
  730. wr.addBits16NC(ml.state, uint8(nbBitsOut))
  731. ml.state = ml.stateTable[dstState]
  732. outBits = mlB.outBits & 31
  733. extraBits = extraBits<<outBits | uint64(s.matchLen&bitMask32[outBits])
  734. extraBitsN += outBits
  735. llB := llTT[s.llCode]
  736. //ll.encode(llB)
  737. nbBitsOut = (uint32(ll.state) + llB.deltaNbBits) >> 16
  738. dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
  739. wr.addBits16NC(ll.state, uint8(nbBitsOut))
  740. ll.state = ll.stateTable[dstState]
  741. outBits = llB.outBits & 31
  742. extraBits = extraBits<<outBits | uint64(s.litLen&bitMask32[outBits])
  743. extraBitsN += outBits
  744. wr.flush32()
  745. wr.addBits64NC(extraBits, extraBitsN)
  746. if debugSequences {
  747. println("Encoded seq", seq, s)
  748. }
  749. seq--
  750. }
  751. ml.flush(mlEnc.actualTableLog)
  752. of.flush(ofEnc.actualTableLog)
  753. ll.flush(llEnc.actualTableLog)
  754. wr.close()
  755. b.output = wr.out
  756. // Maybe even add a bigger margin.
  757. if len(b.output)-3-bhOffset >= b.size {
  758. // Discard and encode as raw block.
  759. b.output = b.encodeRawTo(b.output[:bhOffset], org)
  760. b.popOffsets()
  761. b.litEnc.Reuse = huff0.ReusePolicyNone
  762. return nil
  763. }
  764. // Size is output minus block header.
  765. bh.setSize(uint32(len(b.output)-bhOffset) - 3)
  766. if debugEncoder {
  767. println("Rewriting block header", bh)
  768. }
  769. _ = bh.appendTo(b.output[bhOffset:bhOffset])
  770. b.coders.setPrev(llEnc, mlEnc, ofEnc)
  771. return nil
  772. }
  773. var errIncompressible = errors.New("incompressible")
  774. func (b *blockEnc) genCodes() {
  775. if len(b.sequences) == 0 {
  776. // nothing to do
  777. return
  778. }
  779. if len(b.sequences) > math.MaxUint16 {
  780. panic("can only encode up to 64K sequences")
  781. }
  782. // No bounds checks after here:
  783. llH := b.coders.llEnc.Histogram()
  784. ofH := b.coders.ofEnc.Histogram()
  785. mlH := b.coders.mlEnc.Histogram()
  786. for i := range llH {
  787. llH[i] = 0
  788. }
  789. for i := range ofH {
  790. ofH[i] = 0
  791. }
  792. for i := range mlH {
  793. mlH[i] = 0
  794. }
  795. var llMax, ofMax, mlMax uint8
  796. for i := range b.sequences {
  797. seq := &b.sequences[i]
  798. v := llCode(seq.litLen)
  799. seq.llCode = v
  800. llH[v]++
  801. if v > llMax {
  802. llMax = v
  803. }
  804. v = ofCode(seq.offset)
  805. seq.ofCode = v
  806. ofH[v]++
  807. if v > ofMax {
  808. ofMax = v
  809. }
  810. v = mlCode(seq.matchLen)
  811. seq.mlCode = v
  812. mlH[v]++
  813. if v > mlMax {
  814. mlMax = v
  815. if debugAsserts && mlMax > maxMatchLengthSymbol {
  816. panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
  817. }
  818. }
  819. }
  820. maxCount := func(a []uint32) int {
  821. var max uint32
  822. for _, v := range a {
  823. if v > max {
  824. max = v
  825. }
  826. }
  827. return int(max)
  828. }
  829. if debugAsserts && mlMax > maxMatchLengthSymbol {
  830. panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
  831. }
  832. if debugAsserts && ofMax > maxOffsetBits {
  833. panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
  834. }
  835. if debugAsserts && llMax > maxLiteralLengthSymbol {
  836. panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
  837. }
  838. b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
  839. b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
  840. b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
  841. }