seqdec_generic.go 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. //go:build !amd64 || appengine || !gc || noasm
  2. // +build !amd64 appengine !gc noasm
  3. package zstd
  4. import (
  5. "fmt"
  6. "io"
  7. )
  8. // decode sequences from the stream with the provided history but without dictionary.
  9. func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
  10. return false, nil
  11. }
  12. // decode sequences from the stream without the provided history.
  13. func (s *sequenceDecs) decode(seqs []seqVals) error {
  14. br := s.br
  15. // Grab full sizes tables, to avoid bounds checks.
  16. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
  17. llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
  18. s.seqSize = 0
  19. litRemain := len(s.literals)
  20. maxBlockSize := maxCompressedBlockSize
  21. if s.windowSize < maxBlockSize {
  22. maxBlockSize = s.windowSize
  23. }
  24. for i := range seqs {
  25. var ll, mo, ml int
  26. if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
  27. // inlined function:
  28. // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
  29. // Final will not read from stream.
  30. var llB, mlB, moB uint8
  31. ll, llB = llState.final()
  32. ml, mlB = mlState.final()
  33. mo, moB = ofState.final()
  34. // extra bits are stored in reverse order.
  35. br.fillFast()
  36. mo += br.getBits(moB)
  37. if s.maxBits > 32 {
  38. br.fillFast()
  39. }
  40. ml += br.getBits(mlB)
  41. ll += br.getBits(llB)
  42. if moB > 1 {
  43. s.prevOffset[2] = s.prevOffset[1]
  44. s.prevOffset[1] = s.prevOffset[0]
  45. s.prevOffset[0] = mo
  46. } else {
  47. // mo = s.adjustOffset(mo, ll, moB)
  48. // Inlined for rather big speedup
  49. if ll == 0 {
  50. // There is an exception though, when current sequence's literals_length = 0.
  51. // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
  52. // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
  53. mo++
  54. }
  55. if mo == 0 {
  56. mo = s.prevOffset[0]
  57. } else {
  58. var temp int
  59. if mo == 3 {
  60. temp = s.prevOffset[0] - 1
  61. } else {
  62. temp = s.prevOffset[mo]
  63. }
  64. if temp == 0 {
  65. // 0 is not valid; input is corrupted; force offset to 1
  66. println("WARNING: temp was 0")
  67. temp = 1
  68. }
  69. if mo != 1 {
  70. s.prevOffset[2] = s.prevOffset[1]
  71. }
  72. s.prevOffset[1] = s.prevOffset[0]
  73. s.prevOffset[0] = temp
  74. mo = temp
  75. }
  76. }
  77. br.fillFast()
  78. } else {
  79. if br.overread() {
  80. if debugDecoder {
  81. printf("reading sequence %d, exceeded available data\n", i)
  82. }
  83. return io.ErrUnexpectedEOF
  84. }
  85. ll, mo, ml = s.next(br, llState, mlState, ofState)
  86. br.fill()
  87. }
  88. if debugSequences {
  89. println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
  90. }
  91. // Evaluate.
  92. // We might be doing this async, so do it early.
  93. if mo == 0 && ml > 0 {
  94. return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
  95. }
  96. if ml > maxMatchLen {
  97. return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
  98. }
  99. s.seqSize += ll + ml
  100. if s.seqSize > maxBlockSize {
  101. return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
  102. }
  103. litRemain -= ll
  104. if litRemain < 0 {
  105. return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
  106. }
  107. seqs[i] = seqVals{
  108. ll: ll,
  109. ml: ml,
  110. mo: mo,
  111. }
  112. if i == len(seqs)-1 {
  113. // This is the last sequence, so we shouldn't update state.
  114. break
  115. }
  116. // Manually inlined, ~ 5-20% faster
  117. // Update all 3 states at once. Approx 20% faster.
  118. nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
  119. if nBits == 0 {
  120. llState = llTable[llState.newState()&maxTableMask]
  121. mlState = mlTable[mlState.newState()&maxTableMask]
  122. ofState = ofTable[ofState.newState()&maxTableMask]
  123. } else {
  124. bits := br.get32BitsFast(nBits)
  125. lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
  126. llState = llTable[(llState.newState()+lowBits)&maxTableMask]
  127. lowBits = uint16(bits >> (ofState.nbBits() & 31))
  128. lowBits &= bitMask[mlState.nbBits()&15]
  129. mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
  130. lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
  131. ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
  132. }
  133. }
  134. s.seqSize += litRemain
  135. if s.seqSize > maxBlockSize {
  136. return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
  137. }
  138. err := br.close()
  139. if err != nil {
  140. printf("Closing sequences: %v, %+v\n", err, *br)
  141. }
  142. return err
  143. }
  144. // executeSimple handles cases when a dictionary is not used.
  145. func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
  146. // Ensure we have enough output size...
  147. if len(s.out)+s.seqSize > cap(s.out) {
  148. addBytes := s.seqSize + len(s.out)
  149. s.out = append(s.out, make([]byte, addBytes)...)
  150. s.out = s.out[:len(s.out)-addBytes]
  151. }
  152. if debugDecoder {
  153. printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
  154. }
  155. var t = len(s.out)
  156. out := s.out[:t+s.seqSize]
  157. for _, seq := range seqs {
  158. // Add literals
  159. copy(out[t:], s.literals[:seq.ll])
  160. t += seq.ll
  161. s.literals = s.literals[seq.ll:]
  162. // Malformed input
  163. if seq.mo > t+len(hist) || seq.mo > s.windowSize {
  164. return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
  165. }
  166. // Copy from history.
  167. if v := seq.mo - t; v > 0 {
  168. // v is the start position in history from end.
  169. start := len(hist) - v
  170. if seq.ml > v {
  171. // Some goes into the current block.
  172. // Copy remainder of history
  173. copy(out[t:], hist[start:])
  174. t += v
  175. seq.ml -= v
  176. } else {
  177. copy(out[t:], hist[start:start+seq.ml])
  178. t += seq.ml
  179. continue
  180. }
  181. }
  182. // We must be in the current buffer now
  183. if seq.ml > 0 {
  184. start := t - seq.mo
  185. if seq.ml <= t-start {
  186. // No overlap
  187. copy(out[t:], out[start:start+seq.ml])
  188. t += seq.ml
  189. } else {
  190. // Overlapping copy
  191. // Extend destination slice and copy one byte at the time.
  192. src := out[start : start+seq.ml]
  193. dst := out[t:]
  194. dst = dst[:len(src)]
  195. t += len(src)
  196. // Destination is the space we just added.
  197. for i := range src {
  198. dst[i] = src[i]
  199. }
  200. }
  201. }
  202. }
  203. // Add final literals
  204. copy(out[t:], s.literals)
  205. if debugDecoder {
  206. t += len(s.literals)
  207. if t != len(out) {
  208. panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
  209. }
  210. }
  211. s.out = out
  212. return nil
  213. }