// Copyright 2009 The Go Authors. All rights reserved.
// Copyright (c) 2015 Klaus Post
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
const (
NoCompression = 0
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
// entropy encoding. This mode is useful in compressing data that has
// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
// that lacks an entropy encoder. Compression gains are achieved when
// certain bytes in the input stream occur more frequently than others.
//
// Note that HuffmanOnly produces a compressed output that is
// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
// continue to be able to decompress this output.
HuffmanOnly = -2
ConstantCompression = HuffmanOnly // compatibility alias.
logWindowSize = 15
windowSize = 1 << logWindowSize
windowMask = windowSize - 1
logMaxOffsetSize = 15 // Standard DEFLATE
minMatchLength = 4 // The smallest match that the compressor looks for
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
// The maximum number of tokens we will encode at the time.
// Smaller sizes usually creates less optimal blocks.
// Bigger can make context switching slow.
// We use this for levels 7-9, so we make it big.
maxFlateBlockTokens = 1 << 15
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
maxHashOffset = 1 << 28
skipNever = math.MaxInt32
debugDeflate = false
)
type compressionLevel struct {
good, lazy, nice, chain, fastSkipHashing, level int
}
// Compression levels have been rebalanced from zlib deflate defaults
// to give a bigger spread in speed and compression.
// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
var levels = []compressionLevel{
{}, // 0
// Level 1-6 uses specialized algorithm - values not used
{0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 2},
{0, 0, 0, 0, 0, 3},
{0, 0, 0, 0, 0, 4},
{0, 0, 0, 0, 0, 5},
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
{8, 12, 16, 24, skipNever, 7},
{16, 30, 40, 64, skipNever, 8},
{32, 258, 258, 1024, skipNever, 9},
}
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
type advancedState struct {
// deflate state
length int
offset int
maxInsertIndex int
chainHead int
hashOffset int
ii uint16 // position of last match, intended to overflow to reset.
// input window: unprocessed data is window[index:windowEnd]
index int
hashMatch [maxMatchLength + minMatchLength]uint32
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value
// If hashHead[hashValue] is within the current window, then
// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
// with the same hash value.
hashHead [hashSize]uint32
hashPrev [windowSize]uint32
}
type compressor struct {
compressionLevel
h *huffmanEncoder
w *huffmanBitWriter
// compression algorithm
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
window []byte
windowEnd int
blockStart int // window index where current tokens start
err error
// queued output tokens
tokens tokens
fast fastEnc
state *advancedState
sync bool // requesting flush
byteAvailable bool // if true, still need to process window[index-1].
}
func (d *compressor) fillDeflate(b []byte) int {
s := d.state
if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
//copy(d.window[:], d.window[windowSize:2*windowSize])
*(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
s.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
d.blockStart = math.MaxInt32
}
s.hashOffset += windowSize
if s.hashOffset > maxHashOffset {
delta := s.hashOffset - 1
s.hashOffset -= delta
s.chainHead -= delta
// Iterate over slices instead of arrays to avoid copying
// the entire table onto the stack (Issue #18625).
for i, v := range s.hashPrev[:] {
if int(v) > delta {
s.hashPrev[i] = uint32(int(v) - delta)
} else {
s.hashPrev[i] = 0
}
}
for i, v := range s.hashHead[:] {
if int(v) > delta {
s.hashHead[i] = uint32(int(v) - delta)
} else {
s.hashHead[i] = 0
}
}
}
}
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
if index > 0 || eof {
var window []byte
if d.blockStart <= index {
window = d.window[d.blockStart:index]
}
d.blockStart = index
//d.w.writeBlock(tok, eof, window)
d.w.writeBlockDynamic(tok, eof, window, d.sync)
return d.w.err
}
return nil
}
// writeBlockSkip writes the current block and uses the number of tokens
// to determine if the block should be stored on no matches, or
// only huffman encoded.
func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
if index > 0 || eof {
if d.blockStart <= index {
window := d.window[d.blockStart:index]
// If we removed less than a 64th of all literals
// we huffman compress the block.
if int(tok.n) > len(window)-int(tok.n>>6) {
d.w.writeBlockHuff(eof, window, d.sync)
} else {
// Write a dynamic huffman block.
d.w.writeBlockDynamic(tok, eof, window, d.sync)
}
} else {
d.w.writeBlock(tok, eof, nil)
}
d.blockStart = index
return d.w.err
}
return nil
}
// fillWindow will fill the current window with the supplied
// dictionary and calculate all hashes.
// This is much faster than doing a full encode.
// Should only be used after a start/reset.
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only or huffman mode.
if d.level <= 0 && d.level > -MinCustomWindowSize {
return
}
if d.fast != nil {
// encode the last data, but discard the result
if len(b) > maxMatchOffset {
b = b[len(b)-maxMatchOffset:]
}
d.fast.Encode(&d.tokens, b)
d.tokens.Reset()
return
}
s := d.state
// If we are given too much, cut it.
if len(b) > windowSize {
b = b[len(b)-windowSize:]
}
// Add all to window.
n := copy(d.window[d.windowEnd:], b)
// Calculate 256 hashes at the time (more L1 cache hits)
loops := (n + 256 - minMatchLength) / 256
for j := 0; j < loops; j++ {
startindex := j * 256
end := startindex + 256 + minMatchLength - 1
if end > n {
end = n
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize <= 0 {
continue
}
dst := s.hashMatch[:dstSize]
bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
}
// Update window information.
d.windowEnd += n
s.index = n
}
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
}
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
nice := len(win) - pos
if d.nice < nice {
nice = d.nice
}
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
length = minMatchLength - 1
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
if minIndex < 0 {
minIndex = 0
}
offset = 0
if d.chain < 100 {
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex {
break
}
}
return
}
// Minimum gain to accept a match.
cGain := 4
// Some like it higher (CSV), some like it lower (JSON)
const baseCost = 3
// Base is 4 bytes at with an additional cost.
// Matches must be better than this.
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
// Calculate gain. Estimate
newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
//fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
if newGain > cGain {
length = n
offset = pos - i
cGain = newGain
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
}
if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex {
break
}
}
return
}
func (d *compressor) writeStoredBlock(buf []byte) error {
if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
return d.w.err
}
d.w.writeBytes(buf)
return d.w.err
}
// hash4 returns a hash representation of the first 4 bytes
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
return hash4u(binary.LittleEndian.Uint32(b), hashBits)
}
// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4u(u uint32, h uint8) uint32 {
return (u * prime4bytes) >> (32 - h)
}
// bulkHash4 will compute hashes using the same
// algorithm as hash4
func bulkHash4(b []byte, dst []uint32) {
if len(b) < 4 {
return
}
hb := binary.LittleEndian.Uint32(b)
dst[0] = hash4u(hb, hashBits)
end := len(b) - 4 + 1
for i := 1; i < end; i++ {
hb = (hb >> 8) | uint32(b[i+3])<<24
dst[i] = hash4u(hb, hashBits)
}
}
func (d *compressor) initDeflate() {
d.window = make([]byte, 2*windowSize)
d.byteAvailable = false
d.err = nil
if d.state == nil {
return
}
s := d.state
s.index = 0
s.hashOffset = 1
s.length = minMatchLength - 1
s.offset = 0
s.chainHead = -1
}
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func (d *compressor) deflateLazy() {
s := d.state
// Sanity enables additional runtime tests.
// It's intended to be used during development
// to supplement the currently ad-hoc unit tests.
const sanity = debugDeflate
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
if d.windowEnd != s.index && d.chain > 100 {
// Get literal huffman coder.
if d.h == nil {
d.h = newHuffmanEncoder(maxFlateBlockTokens)
}
var tmp [256]uint16
for _, v := range d.window[s.index:d.windowEnd] {
tmp[v]++
}
d.h.generate(tmp[:], 15)
}
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
for {
if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
lookahead := d.windowEnd - s.index
if lookahead < minMatchLength+maxMatchLength {
if !d.sync {
return
}
if sanity && s.index > d.windowEnd {
panic("index > windowEnd")
}
if lookahead == 0 {
// Flush current output block if any.
if d.byteAvailable {
// There is still one pending token that needs to be flushed
d.tokens.AddLiteral(d.window[s.index-1])
d.byteAvailable = false
}
if d.tokens.n > 0 {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
return
}
}
if s.index < s.maxInsertIndex {
// Update the hash
hash := hash4(d.window[s.index:])
ch := s.hashHead[hash]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[hash] = uint32(s.index + s.hashOffset)
}
prevLength := s.length
prevOffset := s.offset
s.length = minMatchLength - 1
s.offset = 0
minIndex := s.index - windowSize
if minIndex < 0 {
minIndex = 0
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
s.length = newLength
s.offset = newOffset
}
}
if prevLength >= minMatchLength && s.length <= prevLength {
// No better match, but check for better match at end...
//
// Skip forward a number of bytes.
// Offset of 2 seems to yield best results. 3 is sometimes better.
const checkOff = 2
// Check all, except full length
if prevLength < maxMatchLength-checkOff {
prevIndex := s.index - 1
if prevIndex+prevLength < s.maxInsertIndex {
end := lookahead
if lookahead > maxMatchLength+checkOff {
end = maxMatchLength + checkOff
}
end += prevIndex
// Hash at match end.
h := hash4(d.window[prevIndex+prevLength:])
ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
// It seems like a pure length metric is best.
if length > prevLength {
prevLength = length
prevOffset = prevIndex - ch2
// Extend back...
for i := checkOff - 1; i >= 0; i-- {
if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
// Emit tokens we "owe"
for j := 0; j <= i; j++ {
d.tokens.AddLiteral(d.window[prevIndex+j])
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
}
break
} else {
prevLength++
}
}
} else if false {
// Check one further ahead.
// Only rarely better, disabled for now.
prevIndex++
h := hash4(d.window[prevIndex+prevLength:])
ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
// It seems like a pure length metric is best.
if length > prevLength+checkOff {
prevLength = length
prevOffset = prevIndex - ch2
prevIndex--
// Extend back...
for i := checkOff; i >= 0; i-- {
if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
// Emit tokens we "owe"
for j := 0; j <= i; j++ {
d.tokens.AddLiteral(d.window[prevIndex+j])
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
}
break
} else {
prevLength++
}
}
}
}
}
}
}
}
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
// Insert in the hash table all strings up to the end of the match.
// index and index-1 are already inserted. If there is not enough
// lookahead, the last two strings are not inserted into the hash
// table.
newIndex := s.index + prevLength - 1
// Calculate missing hashes
end := newIndex
if end > s.maxInsertIndex {
end = s.maxInsertIndex
}
end += minMatchLength - 1
startindex := s.index + 1
if startindex > s.maxInsertIndex {
startindex = s.maxInsertIndex
}
tocheck := d.window[startindex:end]
dstSize := len(tocheck) - minMatchLength + 1
if dstSize > 0 {
dst := s.hashMatch[:dstSize]
bulkHash4(tocheck, dst)
var newH uint32
for i, val := range dst {
di := i + startindex
newH = val & hashMask
// Get previous value with the same hash.
// Our chain should point to the previous value.
s.hashPrev[di&windowMask] = s.hashHead[newH]
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
}
s.index = newIndex
d.byteAvailable = false
s.length = minMatchLength - 1
if d.tokens.n == maxFlateBlockTokens {
// The block includes the current character
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.ii = 0
} else {
// Reset, if we got a match this run.
if s.length >= minMatchLength {
s.ii = 0
}
// We have a byte waiting. Emit it.
if d.byteAvailable {
s.ii++
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
s.index++
// If we have a long run of no matches, skip additional bytes
// Resets when s.ii overflows after 64KB.
if n := int(s.ii) - d.chain; n > 0 {
n = 1 + int(n>>6)
for j := 0; j < n; j++ {
if s.index >= d.windowEnd-1 {
break
}
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
// Index...
if s.index < s.maxInsertIndex {
h := hash4(d.window[s.index:])
ch := s.hashHead[h]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
s.hashHead[h] = uint32(s.index + s.hashOffset)
}
s.index++
}
// Flush last byte
d.tokens.AddLiteral(d.window[s.index-1])
d.byteAvailable = false
// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
return
}
d.tokens.Reset()
}
}
} else {
s.index++
d.byteAvailable = true
}
}
}
}
func (d *compressor) store() {
if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
d.windowEnd = 0
}
}
// fillWindow will fill the buffer with data for huffman-only compression.
// The number of bytes copied is returned.
func (d *compressor) fillBlock(b []byte) int {
n := copy(d.window[d.windowEnd:], b)
d.windowEnd += n
return n
}
// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeHuff() {
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
return
}
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
d.windowEnd = 0
}
// storeFast will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func (d *compressor) storeFast() {
// We only compress if we have maxStoreBlockSize.
if d.windowEnd < len(d.window) {
if !d.sync {
return
}
// Handle extremely small sizes.
if d.windowEnd < 128 {
if d.windowEnd == 0 {
return
}
if d.windowEnd <= 32 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
} else {
d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
d.err = d.w.err
}
d.tokens.Reset()
d.windowEnd = 0
d.fast.Reset()
return
}
}
d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
// If we made zero matches, store the block as is.
if d.tokens.n == 0 {
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
// If we removed less than 1/16th, huffman compress the block.
} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
} else {
d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
d.err = d.w.err
}
d.tokens.Reset()
d.windowEnd = 0
}
// write will add input byte to the stream.
// Unless an error occurs all bytes will be consumed.
func (d *compressor) write(b []byte) (n int, err error) {
if d.err != nil {
return 0, d.err
}
n = len(b)
for len(b) > 0 {
if d.windowEnd == len(d.window) || d.sync {
d.step(d)
}
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
}
}
return n, d.err
}
func (d *compressor) syncFlush() error {
d.sync = true
if d.err != nil {
return d.err
}
d.step(d)
if d.err == nil {
d.w.writeStoredHeader(0, false)
d.w.flush()
d.err = d.w.err
}
d.sync = false
return d.err
}
func (d *compressor) init(w io.Writer, level int) (err error) {
d.w = newHuffmanBitWriter(w)
switch {
case level == NoCompression:
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).store
case level == ConstantCompression:
d.w.logNewTablePenalty = 10
d.window = make([]byte, 32<<10)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeHuff
case level == DefaultCompression:
level = 5
fallthrough
case level >= 1 && level <= 6:
d.w.logNewTablePenalty = 7
d.fast = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
case 7 <= level && level <= 9:
d.w.logNewTablePenalty = 8
d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
d.fill = (*compressor).fillDeflate
d.step = (*compressor).deflateLazy
case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
d.w.logNewTablePenalty = 7
d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
d.level = level
return nil
}
// reset the state of the compressor.
func (d *compressor) reset(w io.Writer) {
d.w.reset(w)
d.sync = false
d.err = nil
// We only need to reset a few things for Snappy.
if d.fast != nil {
d.fast.Reset()
d.windowEnd = 0
d.tokens.Reset()
return
}
switch d.compressionLevel.chain {
case 0:
// level was NoCompression or ConstantCompression.
d.windowEnd = 0
default:
s := d.state
s.chainHead = -1
for i := range s.hashHead {
s.hashHead[i] = 0
}
for i := range s.hashPrev {
s.hashPrev[i] = 0
}
s.hashOffset = 1
s.index, d.windowEnd = 0, 0
d.blockStart, d.byteAvailable = 0, false
d.tokens.Reset()
s.length = minMatchLength - 1
s.offset = 0
s.ii = 0
s.maxInsertIndex = 0
}
}
func (d *compressor) close() error {
if d.err != nil {
return d.err
}
d.sync = true
d.step(d)
if d.err != nil {
return d.err
}
if d.w.writeStoredHeader(0, true); d.w.err != nil {
return d.w.err
}
d.w.flush()
d.w.reset(nil)
return d.w.err
}
// NewWriter returns a new Writer compressing data at the given level.
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
// higher levels typically run slower but compress more.
// Level 0 (NoCompression) does not attempt any compression; it only adds the
// necessary DEFLATE framing.
// Level -1 (DefaultCompression) uses the default compression level.
// Level -2 (ConstantCompression) will use Huffman compression only, giving
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func NewWriter(w io.Writer, level int) (*Writer, error) {
var dw Writer
if err := dw.d.init(w, level); err != nil {
return nil, err
}
return &dw, nil
}
// NewWriterDict is like NewWriter but initializes the new
// Writer with a preset dictionary. The returned Writer behaves
// as if the dictionary had been written to it without producing
// any compressed output. The compressed data written to w
// can only be decompressed by a Reader initialized with the
// same dictionary.
func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
zw, err := NewWriter(w, level)
if err != nil {
return nil, err
}
zw.d.fillWindow(dict)
zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
return zw, err
}
// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
const MinCustomWindowSize = 32
// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
const MaxCustomWindowSize = windowSize
// NewWriterWindow returns a new Writer compressing data with a custom window size.
// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
if windowSize < MinCustomWindowSize {
return nil, errors.New("flate: requested window size less than MinWindowSize")
}
if windowSize > MaxCustomWindowSize {
return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
}
var dw Writer
if err := dw.d.init(w, -windowSize); err != nil {
return nil, err
}
return &dw, nil
}
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {
d compressor
dict []byte
}
// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
func (w *Writer) Write(data []byte) (n int, err error) {
return w.d.write(data)
}
// Flush flushes any pending data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
// Calling Flush when there is no pending data still causes the Writer
// to emit a sync marker of at least 4 bytes.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (w *Writer) Flush() error {
// For more about flushing:
// http://www.bolet.org/~pornin/deflate-flush.html
return w.d.syncFlush()
}
// Close flushes and closes the writer.
func (w *Writer) Close() error {
return w.d.close()
}
// Reset discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level and dictionary.
func (w *Writer) Reset(dst io.Writer) {
if len(w.dict) > 0 {
// w was created with NewWriterDict
w.d.reset(dst)
if dst != nil {
w.d.fillWindow(w.dict)
}
} else {
// w was created with NewWriter
w.d.reset(dst)
}
}
// ResetDict discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level, but sets a specific dictionary.
func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
w.dict = dict
w.d.reset(dst)
w.d.fillWindow(w.dict)
}
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
// - Literal insertions: Runs of one or more symbols are inserted into the data
// stream as is. This is accomplished through the writeByte method for a
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
// Any valid stream must start with a literal insertion if no preset dictionary
// is used.
//
// - Backward copies: Runs of one or more symbols are copied from previously
// emitted data. Backward copies come as the tuple (dist, length) where dist
// determines how far back in the stream to copy from and length determines how
// many bytes to copy. Note that it is valid for the length to be greater than
// the distance. Since LZ77 uses forward copies, that situation is used to
// perform a form of run-length encoding on repeated runs of symbols.
// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
// method call must be respected.
type dictDecoder struct {
hist []byte // Sliding window history
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
wrPos int // Current output position in buffer
rdPos int // Have emitted hist[:rdPos] already
full bool // Has a full window length been written yet?
}
// init initializes dictDecoder to have a sliding window dictionary of the given
// size. If a preset dict is provided, it will initialize the dictionary with
// the contents of dict.
func (dd *dictDecoder) init(size int, dict []byte) {
*dd = dictDecoder{hist: dd.hist}
if cap(dd.hist) < size {
dd.hist = make([]byte, size)
}
dd.hist = dd.hist[:size]
if len(dict) > len(dd.hist) {
dict = dict[len(dict)-len(dd.hist):]
}
dd.wrPos = copy(dd.hist, dict)
if dd.wrPos == len(dd.hist) {
dd.wrPos = 0
dd.full = true
}
dd.rdPos = dd.wrPos
}
// histSize reports the total amount of historical data in the dictionary.
func (dd *dictDecoder) histSize() int {
if dd.full {
return len(dd.hist)
}
return dd.wrPos
}
// availRead reports the number of bytes that can be flushed by readFlush.
func (dd *dictDecoder) availRead() int {
return dd.wrPos - dd.rdPos
}
// availWrite reports the available amount of output buffer space.
func (dd *dictDecoder) availWrite() int {
return len(dd.hist) - dd.wrPos
}
// writeSlice returns a slice of the available buffer to write data to.
//
// This invariant will be kept: len(s) <= availWrite()
func (dd *dictDecoder) writeSlice() []byte {
return dd.hist[dd.wrPos:]
}
// writeMark advances the writer pointer by cnt.
//
// This invariant must be kept: 0 <= cnt <= availWrite()
func (dd *dictDecoder) writeMark(cnt int) {
dd.wrPos += cnt
}
// writeByte writes a single byte to the dictionary.
//
// This invariant must be kept: 0 < availWrite()
func (dd *dictDecoder) writeByte(c byte) {
dd.hist[dd.wrPos] = c
dd.wrPos++
}
// writeCopy copies a string at a given (dist, length) to the output.
// This returns the number of bytes copied and may be less than the requested
// length if the available space in the output buffer is too small.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) writeCopy(dist, length int) int {
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
endPos := dstPos + length
if endPos > len(dd.hist) {
endPos = len(dd.hist)
}
// Copy non-overlapping section after destination position.
//
// This section is non-overlapping in that the copy length for this section
// is always less than or equal to the backwards distance. This can occur
// if a distance refers to data that wraps-around in the buffer.
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if srcPos < 0 {
srcPos += len(dd.hist)
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
srcPos = 0
}
// Copy possibly overlapping section before destination position.
//
// This section can overlap if the copy length for this section is larger
// than the backwards distance. This is allowed by LZ77 so that repeated
// strings can be succinctly represented using (dist, length) pairs.
// Thus, a forwards copy is performed here; that is, the bytes copied is
// possibly dependent on the resulting bytes in the destination as the copy
// progresses along. This is functionally equivalent to the following:
//
// for i := 0; i < endPos-dstPos; i++ {
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
// }
// dstPos = endPos
//
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// tryWriteCopy tries to copy a string at a given (distance, length) to the
// output. This specialized version is optimized for short distances.
//
// This method is designed to be inlined for performance reasons.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
dstPos := dd.wrPos
endPos := dstPos + length
if dstPos < dist || endPos > len(dd.hist) {
return 0
}
dstBase := dstPos
srcPos := dstPos - dist
// Copy possibly overlapping section before destination position.
loop:
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
if dstPos < endPos {
goto loop // Avoid for-loop so that this function can be inlined
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// readFlush returns a slice of the historical buffer that is ready to be
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other dictDecoder methods.
func (dd *dictDecoder) readFlush() []byte {
toRead := dd.hist[dd.rdPos:dd.wrPos]
dd.rdPos = dd.wrPos
if dd.wrPos == len(dd.hist) {
dd.wrPos, dd.rdPos = 0, 0
dd.full = true
}
return toRead
}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Modified for deflate by Klaus Post (c) 2015.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"fmt"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
type fastEnc interface {
Encode(dst *tokens, src []byte)
Reset()
}
func newFastEnc(level int) fastEnc {
switch level {
case 1:
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
case 2:
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
case 3:
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
case 4:
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
case 5:
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
case 6:
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
default:
panic("invalid level specified")
}
}
const (
tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
baseMatchOffset = 1 // The smallest match offset
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
bTableBits = 17 // Bits used in the big tables
bTableSize = 1 << bTableBits // Size of the table
allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
const (
prime3bytes = 506832829
prime4bytes = 2654435761
prime5bytes = 889523592379
prime6bytes = 227718039650203
prime7bytes = 58295818150454627
prime8bytes = 0xcf1bbcdcb7a56463
)
func load3232(b []byte, i int32) uint32 {
return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
return le.Load64(b, i)
}
type tableEntry struct {
offset int32
}
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastGen struct {
hist []byte
cur int32
}
func (e *fastGen) addBlock(src []byte) int32 {
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < maxMatchOffset*2 {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
*(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
type tableEntryPrev struct {
Cur tableEntry
Prev tableEntry
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
}
// hashLen returns a hash of the lowest mls bytes of with length output bits.
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// length should always be < 32.
// Preferably length and mls should be a constant for inlining.
func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
case 3:
return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5:
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
return uint32((u * prime8bytes) >> (64 - length))
default:
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlen(s, t int, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > maxMatchOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := min(s+maxMatchLength-4, len(src))
left := s1 - s
n := int32(0)
for left >= 8 {
diff := le.Load64(src, s) ^ le.Load64(src, t)
if diff != 0 {
return n + int32(bits.TrailingZeros64(diff)>>3)
}
s += 8
t += 8
n += 8
left -= 8
}
a := src[s:s1]
b := src[t:]
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > maxMatchOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
left := len(src) - s
n := int32(0)
for left >= 8 {
diff := le.Load64(src, s) ^ le.Load64(src, t)
if diff != 0 {
return n + int32(bits.TrailingZeros64(diff)>>3)
}
s += 8
t += 8
n += 8
left -= 8
}
a := src[s:]
b := src[t:]
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
// Reset the encoding table.
func (e *fastGen) Reset() {
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= bufferReset {
e.cur += maxMatchOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"fmt"
"io"
"math"
"github.com/klauspost/compress/internal/le"
)
const (
// The largest offset code.
offsetCodeCount = 30
// The special code used to mark the end of a block.
endBlockMarker = 256
// The first length code.
lengthCodesStart = 257
// The number of codegen codes.
codegenCodeCount = 19
badCode = 255
// maxPredefinedTokens is the maximum number of tokens
// where we check if fixed size is smaller.
maxPredefinedTokens = 250
// bufferFlushSize indicates the buffer size
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
// we accumulate 6 bytes between writes to the buffer.
bufferFlushSize = 246
)
// Minimum length code that emits bits.
const lengthExtraBitsMinCode = 8
// The number of extra bits needed by length code X - LENGTH_CODES_START.
var lengthExtraBits = [32]uint8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
/* 280 */ 4, 5, 5, 5, 5, 0,
}
// The length indicated by length code X - LENGTH_CODES_START.
var lengthBase = [32]uint8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
// Minimum offset code that emits bits.
const offsetExtraBitsMinCode = 4
// offset code word extra bits.
var offsetExtraBits = [32]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
/* extended window */
14, 14,
}
var offsetCombined = [32]uint32{}
func init() {
var offsetBase = [32]uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
/* extended window */
0x008000, 0x00c000,
}
for i := range offsetCombined[:] {
// Don't use extended window values...
if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
continue
}
offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
}
}
// The odd order in which the codegen code sizes are written.
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
type huffmanBitWriter struct {
// writer is the underlying writer.
// Do not use it directly; use the write method, which ensures
// that Write errors are sticky.
writer io.Writer
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits.
bits uint64
nbits uint8
nbytes uint8
lastHuffMan bool
literalEncoding *huffmanEncoder
tmpLitEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder
err error
lastHeader int
// Set between 0 (reused block can be up to 2x the size)
logNewTablePenalty uint
bytes [256 + 8]byte
literalFreq [lengthCodesStart + 32]uint16
offsetFreq [32]uint16
codegenFreq [codegenCodeCount]uint16
// codegen must have an extra space for the final symbol.
codegen [literalCount + offsetCodeCount + 1]uint8
}
// Huffman reuse.
//
// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
//
// This is controlled by several variables:
//
// If lastHeader is non-zero the Huffman table can be reused.
// This also indicates that a Huffman table has been generated that can output all
// possible symbols.
// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
// an EOB with the previous table must be written.
//
// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
//
// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
// optimal size and adding a penalty in 'logNewTablePenalty'.
// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
// is slower both for compression and decompression.
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{
writer: w,
literalEncoding: newHuffmanEncoder(literalCount),
tmpLitEncoding: newHuffmanEncoder(literalCount),
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
}
}
func (w *huffmanBitWriter) reset(writer io.Writer) {
w.writer = writer
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
w.lastHeader = 0
w.lastHuffMan = false
}
func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
a := t.offHist[:offsetCodeCount]
b := w.offsetEncoding.codes
b = b[:len(a)]
for i, v := range a {
if v != 0 && b[i].zero() {
return false
}
}
a = t.extraHist[:literalCount-256]
b = w.literalEncoding.codes[256:literalCount]
b = b[:len(a)]
for i, v := range a {
if v != 0 && b[i].zero() {
return false
}
}
a = t.litHist[:256]
b = w.literalEncoding.codes[:len(a)]
for i, v := range a {
if v != 0 && b[i].zero() {
return false
}
}
return true
}
func (w *huffmanBitWriter) flush() {
if w.err != nil {
w.nbits = 0
return
}
if w.lastHeader > 0 {
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
}
n := w.nbytes
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
w.bits >>= 8
if w.nbits > 8 { // Avoid underflow
w.nbits -= 8
} else {
w.nbits = 0
}
n++
}
w.bits = 0
w.write(w.bytes[:n])
w.nbytes = 0
}
func (w *huffmanBitWriter) write(b []byte) {
if w.err != nil {
return
}
_, w.err = w.writer.Write(b)
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
w.writeOutBits()
}
}
func (w *huffmanBitWriter) writeBytes(bytes []byte) {
if w.err != nil {
return
}
n := w.nbytes
if w.nbits&7 != 0 {
w.err = InternalError("writeBytes with unfinished bits")
return
}
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
w.bits >>= 8
w.nbits -= 8
n++
}
if n != 0 {
w.write(w.bytes[:n])
}
w.nbytes = 0
w.write(bytes)
}
// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
// the literal and offset lengths arrays (which are concatenated into a single
// array). This method generates that run-length encoding.
//
// The result is written into the codegen array, and the frequencies
// of each code is written into the codegenFreq array.
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
// numLiterals The number of literals in literalEncoding
// numOffsets The number of offsets in offsetEncoding
// litenc, offenc The literal and offset encoder to use
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
for i := range w.codegenFreq {
w.codegenFreq[i] = 0
}
// Note that we are using codegen both as a temporary variable for holding
// a copy of the frequencies, and as the place where we put the result.
// This is fine because the output is always shorter than the input used
// so far.
codegen := w.codegen[:] // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
cgnl[i] = litEnc.codes[i].len()
}
cgnl = codegen[numLiterals : numLiterals+numOffsets]
for i := range cgnl {
cgnl[i] = offEnc.codes[i].len()
}
codegen[numLiterals+numOffsets] = badCode
size := codegen[0]
count := 1
outIndex := 0
for inIndex := 1; size != badCode; inIndex++ {
// INVARIANT: We have seen "count" copies of size that have not yet
// had output generated for them.
nextSize := codegen[inIndex]
if nextSize == size {
count++
continue
}
// We need to generate codegen indicating "count" of size.
if size != 0 {
codegen[outIndex] = size
outIndex++
w.codegenFreq[size]++
count--
for count >= 3 {
n := 6
if n > count {
n = count
}
codegen[outIndex] = 16
outIndex++
codegen[outIndex] = uint8(n - 3)
outIndex++
w.codegenFreq[16]++
count -= n
}
} else {
for count >= 11 {
n := 138
if n > count {
n = count
}
codegen[outIndex] = 18
outIndex++
codegen[outIndex] = uint8(n - 11)
outIndex++
w.codegenFreq[18]++
count -= n
}
if count >= 3 {
// count >= 3 && count <= 10
codegen[outIndex] = 17
outIndex++
codegen[outIndex] = uint8(count - 3)
outIndex++
w.codegenFreq[17]++
count = 0
}
}
count--
for ; count >= 0; count-- {
codegen[outIndex] = size
outIndex++
w.codegenFreq[size]++
}
// Set up invariant for next time through the loop.
size = nextSize
count = 1
}
// Marker indicating the end of the codegen.
codegen[outIndex] = badCode
}
func (w *huffmanBitWriter) codegens() int {
numCodegens := len(w.codegenFreq)
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
numCodegens--
}
return numCodegens
}
func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
numCodegens = len(w.codegenFreq)
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
numCodegens--
}
return 3 + 5 + 5 + 4 + (3 * numCodegens) +
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
int(w.codegenFreq[16])*2 +
int(w.codegenFreq[17])*3 +
int(w.codegenFreq[18])*7, numCodegens
}
// dynamicSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
size = litEnc.bitLength(w.literalFreq[:]) +
offEnc.bitLength(w.offsetFreq[:])
return size
}
// dynamicSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
header, numCodegens := w.headerSize()
size = header +
litEnc.bitLength(w.literalFreq[:]) +
offEnc.bitLength(w.offsetFreq[:]) +
extraBits
return size, numCodegens
}
// extraBitSize will return the number of bits that will be written
// as "extra" bits on matches.
func (w *huffmanBitWriter) extraBitSize() int {
total := 0
for i, n := range w.literalFreq[257:literalCount] {
total += int(n) * int(lengthExtraBits[i&31])
}
for i, n := range w.offsetFreq[:offsetCodeCount] {
total += int(n) * int(offsetExtraBits[i&31])
}
return total
}
// fixedSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
return 3 +
fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
extraBits
}
// storedSize calculates the stored size, including header.
// The function returns the size in bits and whether the block
// fits inside a single block.
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
if in == nil {
return 0, false
}
if len(in) <= maxStoreBlockSize {
return (len(in) + 5) * 8, true
}
return 0, false
}
func (w *huffmanBitWriter) writeCode(c hcode) {
// The function does not get inlined if we "& 63" the shift.
w.bits |= c.code64() << (w.nbits & 63)
w.nbits += c.len()
if w.nbits >= 48 {
w.writeOutBits()
}
}
// writeOutBits will write bits to the buffer.
func (w *huffmanBitWriter) writeOutBits() {
bits := w.bits
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
// We over-write, but faster...
le.Store64(w.bytes[n:], bits)
n += 6
if n >= bufferFlushSize {
if w.err != nil {
n = 0
return
}
w.write(w.bytes[:n])
n = 0
}
w.nbytes = n
}
// Write the header of a dynamic Huffman block to the output stream.
//
// numLiterals The number of literals specified in codegen
// numOffsets The number of offsets specified in codegen
// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
}
var firstBits int32 = 4
if isEof {
firstBits = 5
}
w.writeBits(firstBits, 3)
w.writeBits(int32(numLiterals-257), 5)
w.writeBits(int32(numOffsets-1), 5)
w.writeBits(int32(numCodegens-4), 4)
for i := 0; i < numCodegens; i++ {
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
w.writeBits(int32(value), 3)
}
i := 0
for {
var codeWord = uint32(w.codegen[i])
i++
if codeWord == badCode {
break
}
w.writeCode(w.codegenEncoding.codes[codeWord])
switch codeWord {
case 16:
w.writeBits(int32(w.codegen[i]), 2)
i++
case 17:
w.writeBits(int32(w.codegen[i]), 3)
i++
case 18:
w.writeBits(int32(w.codegen[i]), 7)
i++
}
}
}
// writeStoredHeader will write a stored header.
// If the stored block is only used for EOF,
// it is replaced with a fixed huffman block.
func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
if w.err != nil {
return
}
if w.lastHeader > 0 {
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
}
// To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
if length == 0 && isEof {
w.writeFixedHeader(isEof)
// EOB: 7 bits, value: 0
w.writeBits(0, 7)
w.flush()
return
}
var flag int32
if isEof {
flag = 1
}
w.writeBits(flag, 3)
w.flush()
w.writeBits(int32(length), 16)
w.writeBits(int32(^uint16(length)), 16)
}
func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
if w.err != nil {
return
}
if w.lastHeader > 0 {
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
}
// Indicate that we are a fixed Huffman block
var value int32 = 2
if isEof {
value = 3
}
w.writeBits(value, 3)
}
// writeBlock will write a block of tokens with the smallest encoding.
// The original input can be supplied, and if the huffman encoded data
// is larger than the original bytes, the data will be written as a
// stored block.
// If the input is nil, the tokens will always be Huffman encoded.
func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
if w.err != nil {
return
}
tokens.AddEOB()
if w.lastHeader > 0 {
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
}
numLiterals, numOffsets := w.indexTokens(tokens, false)
w.generate()
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
extraBits = w.extraBitSize()
}
// Figure out smallest code.
// Fixed Huffman baseline.
var literalEncoding = fixedLiteralEncoding
var offsetEncoding = fixedOffsetEncoding
var size = math.MaxInt32
if tokens.n < maxPredefinedTokens {
size = w.fixedSize(extraBits)
}
// Dynamic Huffman?
var numCodegens int
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
if dynamicSize < size {
size = dynamicSize
literalEncoding = w.literalEncoding
offsetEncoding = w.offsetEncoding
}
// Stored bytes?
if storable && storedSize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Huffman.
if literalEncoding == fixedLiteralEncoding {
w.writeFixedHeader(eof)
} else {
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
}
// Write the tokens.
w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
}
// writeBlockDynamic encodes a block using a dynamic Huffman table.
// This should be used if the symbols used have a disproportionate
// histogram distribution.
// If input is supplied and the compression savings are below 1/16th of the
// input size the block is stored.
func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
if w.err != nil {
return
}
sync = sync || eof
if sync {
tokens.AddEOB()
}
// We cannot reuse pure huffman table, and must mark as EOF.
if (w.lastHuffMan || eof) && w.lastHeader > 0 {
// We will not try to reuse.
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
w.lastHuffMan = false
}
// fillReuse enables filling of empty values.
// This will make encodings always reusable without testing.
// However, this does not appear to benefit on most cases.
const fillReuse = false
// Check if we can reuse...
if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
}
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
extraBits := 0
ssize, storable := w.storedSize(input)
const usePrefs = true
if storable || w.lastHeader > 0 {
extraBits = w.extraBitSize()
}
var size int
// Check if we should reuse.
if w.lastHeader > 0 {
// Estimate size for using a new table.
// Use the previous header size as the best estimate.
newSize := w.lastHeader + tokens.EstimatedBits()
newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
// The estimated size is calculated as an optimal table.
// We add a penalty to make it more realistic and re-use a bit more.
reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
// Check if a new table is better.
if newSize < reuseSize {
// Write the EOB we owe.
w.writeCode(w.literalEncoding.codes[endBlockMarker])
size = newSize
w.lastHeader = 0
} else {
size = reuseSize
}
if tokens.n < maxPredefinedTokens {
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
// Check if we get a reasonable size decrease.
if storable && ssize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
w.writeFixedHeader(eof)
if !sync {
tokens.AddEOB()
}
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
return
}
}
// Check if we get a reasonable size decrease.
if storable && ssize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
}
// We want a new block/table
if w.lastHeader == 0 {
if fillReuse && !sync {
w.fillTokens()
numLiterals, numOffsets = maxNumLit, maxNumDist
} else {
w.literalFreq[endBlockMarker] = 1
}
w.generate()
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
var numCodegens int
if fillReuse && !sync {
// Reindex for accurate size...
w.indexTokens(tokens, true)
}
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
// Store predefined, if we don't get a reasonable improvement.
if tokens.n < maxPredefinedTokens {
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
// Store bytes, if we don't get an improvement.
if storable && ssize <= preSize {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
w.writeFixedHeader(eof)
if !sync {
tokens.AddEOB()
}
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
return
}
}
if storable && ssize <= size {
// Store bytes, if we don't get an improvement.
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Write Huffman table.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
if !sync {
w.lastHeader, _ = w.headerSize()
}
w.lastHuffMan = false
}
if sync {
w.lastHeader = 0
}
// Write the tokens.
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
}
func (w *huffmanBitWriter) fillTokens() {
for i, v := range w.literalFreq[:literalCount] {
if v == 0 {
w.literalFreq[i] = 1
}
}
for i, v := range w.offsetFreq[:offsetCodeCount] {
if v == 0 {
w.offsetFreq[i] = 1
}
}
}
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
//copy(w.literalFreq[:], t.litHist[:])
*(*[256]uint16)(w.literalFreq[:]) = t.litHist
//copy(w.literalFreq[256:], t.extraHist[:])
*(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
w.offsetFreq = t.offHist
if t.n == 0 {
return
}
if filled {
return maxNumLit, maxNumDist
}
// get the number of literals
numLiterals = len(w.literalFreq)
for w.literalFreq[numLiterals-1] == 0 {
numLiterals--
}
// get the number of offsets
numOffsets = len(w.offsetFreq)
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
numOffsets--
}
if numOffsets == 0 {
// We haven't found a single match. If we want to go with the dynamic encoding,
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
w.offsetFreq[0] = 1
numOffsets = 1
}
return
}
func (w *huffmanBitWriter) generate() {
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
}
// writeTokens writes a slice of tokens to the output.
// codes for literal and offset encoding must be supplied.
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
if w.err != nil {
return
}
if len(tokens) == 0 {
return
}
// Only last token should be endBlockMarker.
var deferEOB bool
if tokens[len(tokens)-1] == endBlockMarker {
tokens = tokens[:len(tokens)-1]
deferEOB = true
}
// Create slices up to the next power of two to avoid bounds checks.
lits := leCodes[:256]
offs := oeCodes[:32]
lengths := leCodes[lengthCodesStart:]
lengths = lengths[:32]
// Go 1.16 LOVES having these on stack.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
for _, t := range tokens {
if t < 256 {
//w.writeCode(lits[t.literal()])
c := lits[t]
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if nbits >= 48 {
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
nbytes += 6
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
continue
}
// Write the length
length := t.length()
lengthCode := lengthCode(length) & 31
if false {
w.writeCode(lengths[lengthCode])
} else {
// inlined
c := lengths[lengthCode]
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if nbits >= 48 {
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
nbytes += 6
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
}
if lengthCode >= lengthExtraBitsMinCode {
extraLengthBits := lengthExtraBits[lengthCode]
//w.writeBits(extraLength, extraLengthBits)
extraLength := int32(length - lengthBase[lengthCode])
bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
nbytes += 6
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
}
// Write the offset
offset := t.offset()
offsetCode := (offset >> 16) & 31
if false {
w.writeCode(offs[offsetCode])
} else {
// inlined
c := offs[offsetCode]
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if nbits >= 48 {
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
nbytes += 6
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
}
if offsetCode >= offsetExtraBitsMinCode {
offsetComb := offsetCombined[offsetCode]
//w.writeBits(extraOffset, extraOffsetBits)
bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
nbits += uint8(offsetComb)
if nbits >= 48 {
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
nbytes += 6
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
}
}
// Restore...
w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
if deferEOB {
w.writeCode(leCodes[endBlockMarker])
}
}
// huffOffset is a static offset encoder used for huffman only encoding.
// It can be reused since we will not be encoding offset values.
var huffOffset *huffmanEncoder
func init() {
w := newHuffmanBitWriter(nil)
w.offsetFreq[0] = 1
huffOffset = newHuffmanEncoder(offsetCodeCount)
huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
}
// writeBlockHuff encodes a block of bytes as either
// Huffman encoded literals or uncompressed bytes if the
// results only gains very little from compression.
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
if w.err != nil {
return
}
// Clear histogram
for i := range w.literalFreq[:] {
w.literalFreq[i] = 0
}
if !w.lastHuffMan {
for i := range w.offsetFreq[:] {
w.offsetFreq[i] = 0
}
}
const numLiterals = endBlockMarker + 1
const numOffsets = 1
// Add everything as literals
// We have to estimate the header size.
// Assume header is around 70 bytes:
// https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8
histogram(input, w.literalFreq[:numLiterals])
ssize, storable := w.storedSize(input)
if storable && len(input) > 1024 {
// Quick check for incompressible content.
abs := float64(0)
avg := float64(len(input)) / 256
max := float64(len(input) * 2)
for _, v := range w.literalFreq[:256] {
diff := float64(v) - avg
abs += diff * diff
if abs > max {
break
}
}
if abs < max {
if debugDeflate {
fmt.Println("stored", abs, "<", max)
}
// No chance we can compress this...
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
}
w.literalFreq[endBlockMarker] = 1
w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
if estBits < math.MaxInt32 {
estBits += w.lastHeader
if w.lastHeader == 0 {
estBits += guessHeaderSizeBits
}
estBits += estBits >> w.logNewTablePenalty
}
// Store bytes, if we don't get a reasonable improvement.
if storable && ssize <= estBits {
if debugDeflate {
fmt.Println("stored,", ssize, "<=", estBits)
}
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
if w.lastHeader > 0 {
reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
if estBits < reuseSize {
if debugDeflate {
fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
}
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
} else if debugDeflate {
fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
}
}
count := 0
if w.lastHeader == 0 {
// Use the temp encoding, so swap.
w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
numCodegens := w.codegens()
// Huffman.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
w.lastHuffMan = true
w.lastHeader, _ = w.headerSize()
if debugDeflate {
count += w.lastHeader
fmt.Println("header:", count/8)
}
}
encoding := w.literalEncoding.codes[:256]
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
if debugDeflate {
count -= int(nbytes)*8 + int(nbits)
}
// Unroll, write 3 codes/loop.
// Fastest number of unrolls.
for len(input) > 3 {
// We must have at least 48 bits free.
if nbits >= 8 {
n := nbits >> 3
le.Store64(w.bytes[nbytes:], bits)
bits >>= (n * 8) & 63
nbits -= n * 8
nbytes += n
}
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
if debugDeflate {
count += int(nbytes) * 8
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
a, b := encoding[input[0]], encoding[input[1]]
bits |= a.code64() << (nbits & 63)
bits |= b.code64() << ((nbits + a.len()) & 63)
c := encoding[input[2]]
nbits += b.len() + a.len()
bits |= c.code64() << (nbits & 63)
nbits += c.len()
input = input[3:]
}
// Remaining...
for _, t := range input {
if nbits >= 48 {
le.Store64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
bits >>= 48
nbits -= 48
nbytes += 6
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
if debugDeflate {
count += int(nbytes) * 8
}
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
// Bitwriting inlined, ~30% speedup
c := encoding[t]
bits |= c.code64() << (nbits & 63)
nbits += c.len()
if debugDeflate {
count += int(c.len())
}
}
// Restore...
w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
if debugDeflate {
nb := count + int(nbytes)*8 + int(nbits)
fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
}
// Flush if needed to have space.
if w.nbits >= 48 {
w.writeOutBits()
}
if eof || sync {
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
w.lastHuffMan = false
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"math"
"math/bits"
)
const (
maxBitsLimit = 16
// number of valid literals
literalCount = 286
)
// hcode is a huffman code with a bit code and bit length.
type hcode uint32
func (h hcode) len() uint8 {
return uint8(h)
}
func (h hcode) code64() uint64 {
return uint64(h >> 8)
}
func (h hcode) zero() bool {
return h == 0
}
type huffmanEncoder struct {
codes []hcode
bitCount [17]int32
// Allocate a reusable buffer with the longest possible frequency table.
// Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
// The largest of these is literalCount, so we allocate for that case.
freqcache [literalCount + 1]literalNode
}
type literalNode struct {
literal uint16
freq uint16
}
// A levelInfo describes the state of the constructed tree for a given depth.
type levelInfo struct {
// Our level. for better printing
level int32
// The frequency of the last node at this level
lastFreq int32
// The frequency of the next character to add to this level
nextCharFreq int32
// The frequency of the next pair (from level below) to add to this level.
// Only valid if the "needed" value of the next lower level is 0.
nextPairFreq int32
// The number of chains remaining to generate for this level before moving
// up to the next level
needed int32
}
// set sets the code and length of an hcode.
func (h *hcode) set(code uint16, length uint8) {
*h = hcode(length) | (hcode(code) << 8)
}
func newhcode(code uint16, length uint8) hcode {
return hcode(length) | (hcode(code) << 8)
}
func reverseBits(number uint16, bitLength byte) uint16 {
return bits.Reverse16(number << ((16 - bitLength) & 15))
}
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
func newHuffmanEncoder(size int) *huffmanEncoder {
// Make capacity to next power of two.
c := uint(bits.Len32(uint32(size - 1)))
return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
}
// Generates a HuffmanCode corresponding to the fixed literal table
func generateFixedLiteralEncoding() *huffmanEncoder {
h := newHuffmanEncoder(literalCount)
codes := h.codes
var ch uint16
for ch = 0; ch < literalCount; ch++ {
var bits uint16
var size uint8
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280
size = 8
}
codes[ch] = newhcode(reverseBits(bits, size), size)
}
return h
}
func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
}
return h
}
var fixedLiteralEncoding = generateFixedLiteralEncoding()
var fixedOffsetEncoding = generateFixedOffsetEncoding()
func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
total += int(f) * int(h.codes[i].len())
}
}
return total
}
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
total += int(h.codes[f].len())
}
return total
}
// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
code := h.codes[i]
if code.zero() {
return math.MaxInt32
}
total += int(f) * int(code.len())
}
}
return total
}
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
//
// and their associated frequencies. The array is in order of increasing
// frequency, and has as its last element a special element with frequency
// MaxInt32
//
// maxBits The maximum number of bits that should be used to encode any literal.
//
// Must be less than 16.
//
// return An integer array in which array[i] indicates the number of literals
//
// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
}
n := int32(len(list))
list = list[0 : n+1]
list[n] = maxNode()
// The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases
if maxBits > n-1 {
maxBits = n - 1
}
// Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that
// level1.prev.needed==0. This makes level1.nextPairFreq
// be a legitimate value that never gets chosen.
var levels [maxBitsLimit]levelInfo
// leafCounts[i] counts the number of literals at the left
// of ancestors of the rightmost node at level i.
// leafCounts[i][j] is the number of literals at the left
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
// Descending to only have 1 bounds check.
l2f := int32(list[2].freq)
l1f := int32(list[1].freq)
l0f := int32(list[0].freq) + int32(list[1].freq)
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
lastFreq: l1f,
nextCharFreq: l2f,
nextPairFreq: l0f,
}
leafCounts[level][level] = 2
if level == 1 {
levels[level].nextPairFreq = math.MaxInt32
}
}
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
level := uint32(maxBits)
for level < 16 {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
// End all calculations for this level.
// To make sure we never come back to this level or any lower level,
// set nextPairFreq impossibly large.
l.needed = 0
levels[level+1].nextPairFreq = math.MaxInt32
level++
continue
}
prevFreq := l.lastFreq
if l.nextCharFreq < l.nextPairFreq {
// The next item on this row is a leaf node.
n := leafCounts[level][level] + 1
l.lastFreq = l.nextCharFreq
// Lower leafCounts are the same of the previous node.
leafCounts[level][level] = n
e := list[n]
if e.literal < math.MaxUint16 {
l.nextCharFreq = int32(e.freq)
} else {
l.nextCharFreq = math.MaxInt32
}
} else {
// The next item on this row is a pair from the previous row.
// nextPairFreq isn't valid until we generate two
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
if true {
save := leafCounts[level][level]
leafCounts[level] = leafCounts[level-1]
leafCounts[level][level] = save
} else {
copy(leafCounts[level][:level], leafCounts[level-1][:level])
}
levels[l.level-1].needed = 2
}
if l.needed--; l.needed == 0 {
// We've done everything we need to do for this level.
// Continue calculating one level up. Fill in nextPairFreq
// of that level with the sum of the two nodes we've just calculated on
// this level.
if l.level == maxBits {
// All done!
break
}
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
level++
} else {
// If we stole from below, move down temporarily to replenish it.
for levels[level-1].needed > 0 {
level--
}
}
}
// Somethings is wrong if at the end, the top level is null or hasn't used
// all of the leaves.
if leafCounts[maxBits][maxBits] != n {
panic("leafCounts[maxBits][maxBits] != n")
}
bitCount := h.bitCount[:maxBits+1]
bits := 1
counts := &leafCounts[maxBits]
for level := maxBits; level > 0; level-- {
// chain.leafCount gives the number of literals requiring at least "bits"
// bits to encode.
bitCount[bits] = counts[level] - counts[level-1]
bits++
}
return bitCount
}
// Look at the leaves and assign them a bit count and an encoding as specified
// in RFC 1951 3.2.2
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
code := uint16(0)
for n, bits := range bitCount {
code <<= 1
if n == 0 || bits == 0 {
continue
}
// The literals list[len(list)-bits] .. list[len(list)-bits]
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
chunk := list[len(list)-int(bits):]
sortByLiteral(chunk)
for _, node := range chunk {
h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
code++
}
list = list[0 : len(list)-int(bits)]
}
}
// Update this Huffman Code object to be the minimum code for the specified frequency count.
//
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list := h.freqcache[:len(freq)+1]
codes := h.codes[:len(freq)]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
for i, f := range freq {
if f != 0 {
list[count] = literalNode{uint16(i), f}
count++
} else {
codes[i] = 0
}
}
list[count] = literalNode{}
list = list[:count]
if count <= 2 {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
for i, node := range list {
// "list" is in order of increasing literal value.
h.codes[node.literal].set(uint16(i), 1)
}
return
}
sortByFreq(list)
// Get the number of literals for each bit count
bitCount := h.bitCounts(list, maxBits)
// And do the assignment
h.assignEncodingAndSize(bitCount, list)
}
// atLeastOne clamps the result between 1 and 15.
func atLeastOne(v float32) float32 {
if v < 1 {
return 1
}
if v > 15 {
return 15
}
return v
}
func histogram(b []byte, h []uint16) {
if true && len(b) >= 8<<10 {
// Split for bigger inputs
histogramSplit(b, h)
} else {
h = h[:256]
for _, t := range b {
h[t]++
}
}
}
func histogramSplit(b []byte, h []uint16) {
// Tested, and slightly faster than 2-way.
// Writing to separate arrays and combining is also slightly slower.
h = h[:256]
for len(b)&3 != 0 {
h[b[0]]++
b = b[1:]
}
n := len(b) / 4
x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
for i, t := range x {
v0 := &h[t]
v1 := &h[y[i]]
v3 := &h[w[i]]
v2 := &h[z[i]]
*v0++
*v1++
*v2++
*v3++
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// Sort sorts data.
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
func sortByFreq(data []literalNode) {
n := len(data)
quickSortByFreq(data, 0, n, maxDepth(n))
}
func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
for b-a > 12 { // Use ShellSort for slices <= 12 elements
if maxDepth == 0 {
heapSort(data, a, b)
return
}
maxDepth--
mlo, mhi := doPivotByFreq(data, a, b)
// Avoiding recursion on the larger subproblem guarantees
// a stack depth of at most lg(b-a).
if mlo-a < b-mhi {
quickSortByFreq(data, a, mlo, maxDepth)
a = mhi // i.e., quickSortByFreq(data, mhi, b)
} else {
quickSortByFreq(data, mhi, b, maxDepth)
b = mlo // i.e., quickSortByFreq(data, a, mlo)
}
}
if b-a > 1 {
// Do ShellSort pass with gap 6
// It could be written in this simplified form cause b-a <= 12
for i := a + 6; i < b; i++ {
if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
data[i], data[i-6] = data[i-6], data[i]
}
}
insertionSortByFreq(data, a, b)
}
}
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
medianOfThreeSortByFreq(data, m, m-s, m+s)
medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
}
medianOfThreeSortByFreq(data, lo, m, hi-1)
// Invariants are:
// data[lo] = pivot (set up by ChoosePivot)
// data[lo < i < a] < pivot
// data[a <= i < b] <= pivot
// data[b <= i < c] unexamined
// data[c <= i < hi-1] > pivot
// data[hi-1] >= pivot
pivot := lo
a, c := lo+1, hi-1
for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
}
b := a
for {
for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
}
for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
}
if b >= c {
break
}
// data[b] > pivot; data[c-1] <= pivot
data[b], data[c-1] = data[c-1], data[b]
b++
c--
}
// If hi-c<3 then there are duplicates (by property of median of nine).
// Let's be a bit more conservative, and set border to 5.
protect := hi-c < 5
if !protect && hi-c < (hi-lo)/4 {
// Lets test some points for equality to pivot
dups := 0
if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
data[c], data[hi-1] = data[hi-1], data[c]
c++
dups++
}
if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
b--
dups++
}
// m-lo = (hi-lo)/2 > 6
// b-lo > (hi-lo)*3/4-1 > 8
// ==> m < b ==> data[m] <= pivot
if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
data[m], data[b-1] = data[b-1], data[m]
b--
dups++
}
// if at least 2 points are equal to pivot, assume skewed distribution
protect = dups > 1
}
if protect {
// Protect against a lot of duplicates
// Add invariant:
// data[a <= i < b] unexamined
// data[b <= i < c] = pivot
for {
for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
}
for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
}
if a >= b {
break
}
// data[a] == pivot; data[b-1] < pivot
data[a], data[b-1] = data[b-1], data[a]
a++
b--
}
}
// Swap pivot into middle
data[pivot], data[b-1] = data[b-1], data[pivot]
return b - 1, c
}
// Insertion sort
func insertionSortByFreq(data []literalNode, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// quickSortByFreq, loosely following Bentley and McIlroy,
// ``Engineering a Sort Function,'' SP&E November 1993.
// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
// sort 3 elements
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
data[m1], data[m0] = data[m0], data[m1]
}
// data[m0] <= data[m1]
if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
data[m2], data[m1] = data[m1], data[m2]
// data[m0] <= data[m2] && data[m1] < data[m2]
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
data[m1], data[m0] = data[m0], data[m1]
}
}
// now data[m0] <= data[m1] <= data[m2]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// Sort sorts data.
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
func sortByLiteral(data []literalNode) {
n := len(data)
quickSort(data, 0, n, maxDepth(n))
}
func quickSort(data []literalNode, a, b, maxDepth int) {
for b-a > 12 { // Use ShellSort for slices <= 12 elements
if maxDepth == 0 {
heapSort(data, a, b)
return
}
maxDepth--
mlo, mhi := doPivot(data, a, b)
// Avoiding recursion on the larger subproblem guarantees
// a stack depth of at most lg(b-a).
if mlo-a < b-mhi {
quickSort(data, a, mlo, maxDepth)
a = mhi // i.e., quickSort(data, mhi, b)
} else {
quickSort(data, mhi, b, maxDepth)
b = mlo // i.e., quickSort(data, a, mlo)
}
}
if b-a > 1 {
// Do ShellSort pass with gap 6
// It could be written in this simplified form cause b-a <= 12
for i := a + 6; i < b; i++ {
if data[i].literal < data[i-6].literal {
data[i], data[i-6] = data[i-6], data[i]
}
}
insertionSort(data, a, b)
}
}
func heapSort(data []literalNode, a, b int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDown(data, i, hi, first)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDown(data, lo, i, first)
}
}
// siftDown implements the heap property on data[lo, hi).
// first is an offset into the array where the root of the heap lies.
func siftDown(data []literalNode, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
child++
}
if data[first+root].literal > data[first+child].literal {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
medianOfThree(data, lo, lo+s, lo+2*s)
medianOfThree(data, m, m-s, m+s)
medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
}
medianOfThree(data, lo, m, hi-1)
// Invariants are:
// data[lo] = pivot (set up by ChoosePivot)
// data[lo < i < a] < pivot
// data[a <= i < b] <= pivot
// data[b <= i < c] unexamined
// data[c <= i < hi-1] > pivot
// data[hi-1] >= pivot
pivot := lo
a, c := lo+1, hi-1
for ; a < c && data[a].literal < data[pivot].literal; a++ {
}
b := a
for {
for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
}
for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
}
if b >= c {
break
}
// data[b] > pivot; data[c-1] <= pivot
data[b], data[c-1] = data[c-1], data[b]
b++
c--
}
// If hi-c<3 then there are duplicates (by property of median of nine).
// Let's be a bit more conservative, and set border to 5.
protect := hi-c < 5
if !protect && hi-c < (hi-lo)/4 {
// Lets test some points for equality to pivot
dups := 0
if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
data[c], data[hi-1] = data[hi-1], data[c]
c++
dups++
}
if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
b--
dups++
}
// m-lo = (hi-lo)/2 > 6
// b-lo > (hi-lo)*3/4-1 > 8
// ==> m < b ==> data[m] <= pivot
if data[m].literal > data[pivot].literal { // data[m] = pivot
data[m], data[b-1] = data[b-1], data[m]
b--
dups++
}
// if at least 2 points are equal to pivot, assume skewed distribution
protect = dups > 1
}
if protect {
// Protect against a lot of duplicates
// Add invariant:
// data[a <= i < b] unexamined
// data[b <= i < c] = pivot
for {
for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
}
for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
}
if a >= b {
break
}
// data[a] == pivot; data[b-1] < pivot
data[a], data[b-1] = data[b-1], data[a]
a++
b--
}
}
// Swap pivot into middle
data[pivot], data[b-1] = data[b-1], data[pivot]
return b - 1, c
}
// Insertion sort
func insertionSort(data []literalNode, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// maxDepth returns a threshold at which quicksort should switch
// to heapsort. It returns 2*ceil(lg(n+1)).
func maxDepth(n int) int {
var depth int
for i := n; i > 0; i >>= 1 {
depth++
}
return depth * 2
}
// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
func medianOfThree(data []literalNode, m1, m0, m2 int) {
// sort 3 elements
if data[m1].literal < data[m0].literal {
data[m1], data[m0] = data[m0], data[m1]
}
// data[m0] <= data[m1]
if data[m2].literal < data[m1].literal {
data[m2], data[m1] = data[m1], data[m2]
// data[m0] <= data[m2] && data[m1] < data[m2]
if data[m1].literal < data[m0].literal {
data[m1], data[m0] = data[m0], data[m1]
}
}
// now data[m0] <= data[m1] <= data[m2]
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package flate implements the DEFLATE compressed data format, described in
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
// formats.
package flate
import (
"bufio"
"compress/flate"
"fmt"
"io"
"math/bits"
"sync"
)
const (
maxCodeLen = 16 // max length of Huffman code
maxCodeLenMask = 15 // mask for max length of Huffman code
// The next three numbers come from the RFC section 3.2.7, with the
// additional proviso in section 3.2.5 which implies that distance codes
// 30 and 31 should never occur in compressed data.
maxNumLit = 286
maxNumDist = 30
numCodes = 19 // number of codes in Huffman meta-code
debugDecode = false
)
// Value of length - 3 and extra bits.
type lengthExtra struct {
length, extra uint8
}
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
var bitMask32 = [32]uint32{
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
} // up to 32 bits
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
// A CorruptInputError reports the presence of corrupt input at a given offset.
type CorruptInputError = flate.CorruptInputError
// An InternalError reports an error in the flate code itself.
type InternalError string
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
// A ReadError reports an error encountered while reading input.
//
// Deprecated: No longer returned.
type ReadError = flate.ReadError
// A WriteError reports an error encountered while writing output.
//
// Deprecated: No longer returned.
type WriteError = flate.WriteError
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// The data structure for decoding Huffman tables is based on that of
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
// For codes smaller than the table width, there are multiple entries
// (each combination of trailing bits has the same value). For codes
// larger than the table width, the table contains a link to an overflow
// table. The width of each entry in the link table is the maximum code
// size minus the chunk width.
//
// Note that you can do a lookup in the table even without all bits
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
// have the property that shorter codes come before longer ones, the
// bit length estimate in the result is a lower bound on the actual
// number of bits.
//
// See the following:
// http://www.gzip.org/algorithm.txt
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
maxRead int // the maximum number of bits we can read and not overread
chunks *[huffmanNumChunks]uint16 // chunks as described above
links [][]uint16 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(lengths []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.chunks == nil {
h.chunks = new([huffmanNumChunks]uint16)
}
if h.maxRead != 0 {
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
}
// Count number of codes of each length,
// compute maxRead and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range lengths {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n&maxCodeLenMask]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i&maxCodeLenMask] = code
code += count[i&maxCodeLenMask]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
if debugDecode {
fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
}
return false
}
h.maxRead = min
chunks := h.chunks[:]
for i := range chunks {
chunks[i] = 0
}
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link {
h.links = make([][]uint16, huffmanNumChunks-link)
} else {
h.links = h.links[:huffmanNumChunks-link]
}
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(bits.Reverse16(uint16(j)))
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks {
h.links[off] = make([]uint16, numLinks)
} else {
h.links[off] = h.links[off][:numLinks]
}
}
} else {
h.links = h.links[:0]
}
for i, n := range lengths {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint16(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
// Reader is the actual read interface needed by NewReader.
// If the passed in io.Reader does not also have ReadByte,
// the NewReader will introduce its own buffering.
type Reader interface {
io.Reader
io.ByteReader
}
type step uint8
const (
copyData step = iota + 1
nextBlock
huffmanBytesBuffer
huffmanBytesReader
huffmanBufioReader
huffmanStringsReader
huffmanGenericReader
)
// flushMode tells decompressor when to return data
type flushMode uint8
const (
syncFlush flushMode = iota // return data after sync flush block
partialFlush // return data after each block
)
// Decompress state.
type decompressor struct {
// Input source.
r Reader
roffset int64
// Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder
// Length arrays used to define Huffman codes.
bits *[maxNumLit + maxNumDist]int
codebits *[numCodes]int
// Output history, buffer.
dict dictDecoder
// Next step in the decompression,
// and decompression state.
step step
stepState int
err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
copyDist int
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Input bits, in top of b.
b uint32
nb uint
final bool
flushMode flushMode
}
func (f *decompressor) nextBlock() {
for f.nb < 1+2 {
if f.err = f.moreBits(); f.err != nil {
return
}
}
f.final = f.b&1 == 1
f.b >>= 1
typ := f.b & 3
f.b >>= 2
f.nb -= 1 + 2
switch typ {
case 0:
f.dataBlock()
if debugDecode {
fmt.Println("stored block")
}
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlockDecoder()
if debugDecode {
fmt.Println("predefinied huffman block")
}
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
break
}
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlockDecoder()
if debugDecode {
fmt.Println("dynamic huffman block")
}
default:
// 3 is reserved.
if debugDecode {
fmt.Println("reserved data block encountered")
}
f.err = CorruptInputError(f.roffset)
}
}
func (f *decompressor) Read(b []byte) (int, error) {
for {
if len(f.toRead) > 0 {
n := copy(b, f.toRead)
f.toRead = f.toRead[n:]
if len(f.toRead) == 0 {
return n, f.err
}
return n, nil
}
if f.err != nil {
return 0, f.err
}
f.doStep()
if f.err != nil && len(f.toRead) == 0 {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
}
}
}
// WriteTo implements the io.WriteTo interface for io.Copy and friends.
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
total := int64(0)
flushed := false
for {
if len(f.toRead) > 0 {
n, err := w.Write(f.toRead)
total += int64(n)
if err != nil {
f.err = err
return total, err
}
if n != len(f.toRead) {
return total, io.ErrShortWrite
}
f.toRead = f.toRead[:0]
}
if f.err != nil && flushed {
if f.err == io.EOF {
return total, nil
}
return total, f.err
}
if f.err == nil {
f.doStep()
}
if len(f.toRead) == 0 && f.err != nil && !flushed {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
flushed = true
}
}
}
func (f *decompressor) Close() error {
if f.err == io.EOF {
return nil
}
return f.err
}
// RFC 1951 section 3.2.7.
// Compression with dynamic Huffman codes
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
func (f *decompressor) readHuffman() error {
// HLIT[5], HDIST[5], HCLEN[4].
for f.nb < 5+5+4 {
if err := f.moreBits(); err != nil {
return err
}
}
nlit := int(f.b&0x1F) + 257
if nlit > maxNumLit {
if debugDecode {
fmt.Println("nlit > maxNumLit", nlit)
}
return CorruptInputError(f.roffset)
}
f.b >>= 5
ndist := int(f.b&0x1F) + 1
if ndist > maxNumDist {
if debugDecode {
fmt.Println("ndist > maxNumDist", ndist)
}
return CorruptInputError(f.roffset)
}
f.b >>= 5
nclen := int(f.b&0xF) + 4
// numCodes is 19, so nclen is always valid.
f.b >>= 4
f.nb -= 5 + 5 + 4
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
for i := 0; i < nclen; i++ {
for f.nb < 3 {
if err := f.moreBits(); err != nil {
return err
}
}
f.codebits[codeOrder[i]] = int(f.b & 0x7)
f.b >>= 3
f.nb -= 3
}
for i := nclen; i < len(codeOrder); i++ {
f.codebits[codeOrder[i]] = 0
}
if !f.h1.init(f.codebits[0:]) {
if debugDecode {
fmt.Println("init codebits failed")
}
return CorruptInputError(f.roffset)
}
// HLIT + 257 code lengths, HDIST + 1 code lengths,
// using the code length Huffman code.
for i, n := 0, nlit+ndist; i < n; {
x, err := f.huffSym(&f.h1)
if err != nil {
return err
}
if x < 16 {
// Actual length.
f.bits[i] = x
i++
continue
}
// Repeat previous length or zero.
var rep int
var nb uint
var b int
switch x {
default:
return InternalError("unexpected length code")
case 16:
rep = 3
nb = 2
if i == 0 {
if debugDecode {
fmt.Println("i==0")
}
return CorruptInputError(f.roffset)
}
b = f.bits[i-1]
case 17:
rep = 3
nb = 3
b = 0
case 18:
rep = 11
nb = 7
b = 0
}
for f.nb < nb {
if err := f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits:", err)
}
return err
}
}
rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1))
f.b >>= nb & regSizeMaskUint32
f.nb -= nb
if i+rep > n {
if debugDecode {
fmt.Println("i+rep > n", i, rep, n)
}
return CorruptInputError(f.roffset)
}
for j := 0; j < rep; j++ {
f.bits[i] = b
i++
}
}
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
if debugDecode {
fmt.Println("init2 failed")
}
return CorruptInputError(f.roffset)
}
// As an optimization, we can initialize the maxRead bits to read at a time
// for the HLIT tree to the length of the EOB marker since we know that
// every block must terminate with one. This preserves the property that
// we never read any extra bytes after the end of the DEFLATE stream.
if f.h1.maxRead < f.bits[endBlockMarker] {
f.h1.maxRead = f.bits[endBlockMarker]
}
if !f.final {
// If not the final block, the smallest block possible is
// a predefined table, BTYPE=01, with a single EOB marker.
// This will take up 3 + 7 bits.
f.h1.maxRead += 10
}
return nil
}
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
// Discard current half-byte.
left := (f.nb) & 7
f.nb -= left
f.b >>= left
offBytes := f.nb >> 3
// Unfilled values will be overwritten.
f.buf[0] = uint8(f.b)
f.buf[1] = uint8(f.b >> 8)
f.buf[2] = uint8(f.b >> 16)
f.buf[3] = uint8(f.b >> 24)
f.roffset += int64(offBytes)
f.nb, f.b = 0, 0
// Length then ones-complement of length.
nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
f.roffset += int64(nr)
if err != nil {
f.err = noEOF(err)
return
}
n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
if nn != ^n {
if debugDecode {
ncomp := ^n
fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
}
f.err = CorruptInputError(f.roffset)
return
}
if n == 0 {
if f.flushMode == syncFlush {
f.toRead = f.dict.readFlush()
}
f.finishBlock()
return
}
f.copyLen = int(n)
f.copyData()
}
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
// It pauses for reads when f.hist is full.
func (f *decompressor) copyData() {
buf := f.dict.writeSlice()
if len(buf) > f.copyLen {
buf = buf[:f.copyLen]
}
cnt, err := io.ReadFull(f.r, buf)
f.roffset += int64(cnt)
f.copyLen -= cnt
f.dict.writeMark(cnt)
if err != nil {
f.err = noEOF(err)
return
}
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = copyData
return
}
f.finishBlock()
}
func (f *decompressor) finishBlock() {
if f.final {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.err = io.EOF
} else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.step = nextBlock
}
func (f *decompressor) doStep() {
switch f.step {
case copyData:
f.copyData()
case nextBlock:
f.nextBlock()
case huffmanBytesBuffer:
f.huffmanBytesBuffer()
case huffmanBytesReader:
f.huffmanBytesReader()
case huffmanBufioReader:
f.huffmanBufioReader()
case huffmanStringsReader:
f.huffmanStringsReader()
case huffmanGenericReader:
f.huffmanGenericReader()
default:
panic("BUG: unexpected step state")
}
}
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
func noEOF(e error) error {
if e == io.EOF {
return io.ErrUnexpectedEOF
}
return e
}
func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte()
if err != nil {
return noEOF(err)
}
f.roffset++
f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
f.nb += 8
return nil
}
// Read the next Huffman-encoded symbol from f according to h.
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(h.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := f.r.ReadByte()
if err != nil {
f.b = b
f.nb = nb
return 0, noEOF(err)
}
f.roffset++
b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := h.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
return int(chunk >> huffmanValueShift), nil
}
}
}
func makeReader(r io.Reader) Reader {
if rr, ok := r.(Reader); ok {
return rr
}
return bufio.NewReader(r)
}
func fixedHuffmanDecoderInit() {
fixedOnce.Do(func() {
// These come from the RFC section 3.2.6.
var bits [288]int
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
fixedHuffmanDecoder.init(bits[:])
})
}
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
*f = decompressor{
r: makeReader(r),
bits: f.bits,
codebits: f.codebits,
h1: f.h1,
h2: f.h2,
dict: f.dict,
step: nextBlock,
}
f.dict.init(maxMatchOffset, dict)
return nil
}
type ReaderOpt func(*decompressor)
// WithPartialBlock tells decompressor to return after each block,
// so it can read data written with partial flush
func WithPartialBlock() ReaderOpt {
return func(f *decompressor) {
f.flushMode = partialFlush
}
}
// WithDict initializes the reader with a preset dictionary
func WithDict(dict []byte) ReaderOpt {
return func(f *decompressor) {
f.dict.init(maxMatchOffset, dict)
}
}
// NewReaderOpts returns new reader with provided options
func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, nil)
for _, opt := range opts {
opt(&f)
}
return &f
}
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser
// when finished reading.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) io.ReadCloser {
return NewReaderOpts(r)
}
// NewReaderDict is like NewReader but initializes the reader
// with a preset dictionary. The returned Reader behaves as if
// the uncompressed data stream started with the given dictionary,
// which has already been read. NewReaderDict is typically used
// to read data compressed by NewWriterDict.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
return NewReaderOpts(r, WithDict(dict))
}
// Code generated by go generate gen_inflate.go. DO NOT EDIT.
package flate
import (
"bufio"
"bytes"
"fmt"
"math/bits"
"strings"
)
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanBytesBuffer() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
fr := f.r.(*bytes.Buffer)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
var v int
{
// Inlined v, err := f.huffSym(f.hl)
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
}
}
var length int
switch {
case v < 256:
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = huffmanBytesBuffer
f.stepState = stateInit
f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
case v < maxNumLit:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
length += int(fb & bitMask32[n])
fb >>= n & regSizeMaskUint32
fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
fb >>= 5
fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
extra |= fb & bitMask32[nb]
fb >>= nb & regSizeMaskUint32
fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
// slower: dist = bitMask32[nb+1] + 2 + extra
default:
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = huffmanBytesBuffer // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
// Not reached
}
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanBytesReader() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
fr := f.r.(*bytes.Reader)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
var v int
{
// Inlined v, err := f.huffSym(f.hl)
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
}
}
var length int
switch {
case v < 256:
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = huffmanBytesReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
case v < maxNumLit:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
length += int(fb & bitMask32[n])
fb >>= n & regSizeMaskUint32
fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
fb >>= 5
fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
extra |= fb & bitMask32[nb]
fb >>= nb & regSizeMaskUint32
fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
// slower: dist = bitMask32[nb+1] + 2 + extra
default:
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = huffmanBytesReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
// Not reached
}
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanBufioReader() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
fr := f.r.(*bufio.Reader)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
var v int
{
// Inlined v, err := f.huffSym(f.hl)
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
}
}
var length int
switch {
case v < 256:
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = huffmanBufioReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
case v < maxNumLit:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
length += int(fb & bitMask32[n])
fb >>= n & regSizeMaskUint32
fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
fb >>= 5
fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
extra |= fb & bitMask32[nb]
fb >>= nb & regSizeMaskUint32
fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
// slower: dist = bitMask32[nb+1] + 2 + extra
default:
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = huffmanBufioReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
// Not reached
}
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanStringsReader() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
fr := f.r.(*strings.Reader)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
var v int
{
// Inlined v, err := f.huffSym(f.hl)
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
}
}
var length int
switch {
case v < 256:
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = huffmanStringsReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
case v < maxNumLit:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
length += int(fb & bitMask32[n])
fb >>= n & regSizeMaskUint32
fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
fb >>= 5
fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
extra |= fb & bitMask32[nb]
fb >>= nb & regSizeMaskUint32
fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
// slower: dist = bitMask32[nb+1] + 2 + extra
default:
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = huffmanStringsReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
// Not reached
}
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanGenericReader() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
fr := f.r.(Reader)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
var v int
{
// Inlined v, err := f.huffSym(f.hl)
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
}
}
var length int
switch {
case v < 256:
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = huffmanGenericReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
case v < maxNumLit:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
length += int(fb & bitMask32[n])
fb >>= n & regSizeMaskUint32
fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
fb >>= 5
fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
for {
for fnb < n {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= fnb {
if n == 0 {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
fb = fb >> (n & regSizeMaskUint32)
fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
f.roffset++
fb |= uint32(c) << (fnb & regSizeMaskUint32)
fnb += 8
}
extra |= fb & bitMask32[nb]
fb >>= nb & regSizeMaskUint32
fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
// slower: dist = bitMask32[nb+1] + 2 + extra
default:
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = huffmanGenericReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
// Not reached
}
func (f *decompressor) huffmanBlockDecoder() {
switch f.r.(type) {
case *bytes.Buffer:
f.huffmanBytesBuffer()
case *bytes.Reader:
f.huffmanBytesReader()
case *bufio.Reader:
f.huffmanBufioReader()
case *strings.Reader:
f.huffmanStringsReader()
case Reader:
f.huffmanGenericReader()
default:
f.huffmanGenericReader()
}
}
package flate
import (
"fmt"
"github.com/klauspost/compress/internal/le"
)
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL1 struct {
fastGen
table [tableSize]tableEntry
}
// EncodeL1 uses a similar algorithm to level 1
func (e *fastEncL1) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
var t int32
for {
nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, tableBits, hashBytes)
t = candidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
t = candidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
cv = now
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
// Save the match found
if false {
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
} else {
// Inlined...
xoffset := uint32(s - t - baseMatchOffset)
xlength := l
oc := offsetCode(xoffset)
xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
if xl > 258+baseMatchLength {
xl = 258
} else {
xl = 258 - baseMatchLength
}
}
xlength -= xl
xl -= baseMatchLength
dst.extraHist[lengthCodes1[uint8(xl)]]++
dst.offHist[oc]++
dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
dst.n++
}
}
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+l+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashLen(x, tableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
t = candidate.offset - e.cur
if s-t > maxMatchOffset || uint32(x) != load3232(src, t) {
cv = x >> 8
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
package flate
import "fmt"
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL2 struct {
fastGen
table [bTableSize]tableEntry
}
// EncodeL2 uses a similar algorithm to level 1, but is capable
// of matching across blocks giving better compression at a small slowdown.
func (e *fastEncL2) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
// When should we start skipping if we haven't found matches in a long while.
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, bTableBits, hashBytes)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidate = e.table[nextHash]
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, bTableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+l+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, i)
nextHash := hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one
x >>= 16
nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
// Skip one
x >>= 16
nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
prevHash := hashLen(x, bTableBits, hashBytes)
prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
e.table[prevHash2] = tableEntry{offset: o + 1}
currHash := hashLen(x>>16, bTableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
cv = x >> 24
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
package flate
import "fmt"
// fastEncL3
type fastEncL3 struct {
fastGen
table [1 << 16]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
tableBits = 16
tableSize = 1 << tableBits
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
}
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
e.table[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// Skip if too small.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 7
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, tableBits, hashBytes)
s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash]
now := load6432(src, nextS)
// Safe offset distance until s + 4...
minOffset := e.cur + s - (maxMatchOffset - 4)
e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
// Check both candidates
candidate = candidates.Cur
if candidate.offset < minOffset {
cv = now
// Previous will also be invalid, we have nothing.
continue
}
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
break
}
// Both match and are valid, pick longest.
offset := s - (candidate.offset - e.cur)
o2 := s - (candidates.Prev.offset - e.cur)
l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
if l2 > l1 {
candidate = candidates.Prev
}
break
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
}
cv = now
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
t := candidate.offset - e.cur
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
t += l
// Index first pair after match end.
if int(t+8) < len(src) && t > 0 {
cv = load6432(src, t)
nextHash := hashLen(cv, tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t},
}
}
goto emitRemainder
}
// Store every 5th hash in-between.
for i := s - l + 2; i < s-5; i += 6 {
nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + i}}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s.
x := load6432(src, s-2)
prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2},
}
x >>= 8
prevHash = hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1},
}
x >>= 8
currHash := hashLen(x, tableBits, hashBytes)
candidates := e.table[currHash]
cv = x
e.table[currHash] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur},
}
// Check both candidates
candidate = candidates.Cur
minOffset := e.cur + s - (maxMatchOffset - 4)
if candidate.offset > minOffset {
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Found a match...
continue
}
candidate = candidates.Prev
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Match at prev...
continue
}
}
cv = x >> 8
s++
break
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
package flate
import "fmt"
type fastEncL4 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntry
}
func (e *fastEncL4) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.bTable[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
e.bTable[nextHashL] = entry
t = lCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// We got a long match. Use that.
break
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
lCandidate = e.bTable[hash7(next, tableBits)]
// If the next long is a candidate, check if we should use that instead...
lOff := lCandidate.offset - e.cur
if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) {
l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
if l2 > l1 {
s = nextS
t = lCandidate.offset - e.cur
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Extend the 4-byte match as long as possible.
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic("s-t")
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// Store every 3rd hash in-between
if true {
i := nextS
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
i += 3
for ; i < s-1; i += 3 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
e.bTable[prevHashL] = tableEntry{offset: o}
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
package flate
import "fmt"
type fastEncL5 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
l = e.matchlen(int(s+4), int(t+4), src) + 4
ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
l = e.matchlen(int(s+4), int(t+4), src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(int(s+4), int(t+4), src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(int(s+l), int(t+l), src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// fastEncL5Window is a level 5 encoder,
// but with a custom window size.
type fastEncL5Window struct {
hist []byte
cur int32
maxOffset int32
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
maxMatchOffset := e.maxOffset
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, t2) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// Reset the encoding table.
func (e *fastEncL5Window) Reset() {
// We keep the same allocs, since we are compressing the same block sizes.
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= int32(bufferReset) {
e.cur += e.maxOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}
func (e *fastEncL5Window) addBlock(src []byte) int32 {
// check if we have space already
maxMatchOffset := e.maxOffset
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < int(maxMatchOffset*2) {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
}
package flate
import "fmt"
type fastEncL6 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
// Repeat MUST be > 1 and within range
repeat := int32(1)
for {
const skipLog = 7
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
// Calculate hashes of 'next'
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, t) {
// Long candidate matches at least 4 bytes.
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// Check the previous long candidate as well.
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
l = e.matchlen(int(s+4), int(t+4), src) + 4
ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
// Current value did not match, but check if previous long value does.
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
// Found a 4 match...
l = e.matchlen(int(s+4), int(t+4), src) + 4
// Look up next long candidate (at nextS)
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// Check repeat at s + repOff
const repOff = 1
t2 := s - repeat + repOff
if load3232(src, t2) == uint32(cv>>(8*repOff)) {
ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4
if ml > l {
t = t2
l = ml
s += repOff
// Not worth checking more.
break
}
}
// If the next long is a candidate, use that...
t2 = lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
l = ml
// This is ok, but check previous as well.
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Extend the 4-byte match as long as possible.
if l == 0 {
l = e.matchlenLong(int(s+4), int(t+4), src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(int(s+l), int(t+l), src)
}
// Try to locate a better match by checking the end-of-match...
if sAt := s + l; sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
// Test current
t2 := eLong.Cur.offset - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
t = t2
l = l2
s = s2
}
}
// Test next:
t2 = eLong.Prev.offset - e.cur - l + skipBeginning
off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if false {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
repeat = s - t
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index after match end.
for i := nextS + 1; i < int32(len(src))-8; i += 2 {
cv := load6432(src, i)
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
}
goto emitRemainder
}
// Store every long hash in-between and every second short.
if true {
for i := nextS + 1; i < s-1; i += 2 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong.Cur, eLong.Prev = t, eLong.Cur
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
cv = load6432(src, s)
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
import (
"math/bits"
"github.com/klauspost/compress/internal/le"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
left := len(a)
for left >= 8 {
diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
left -= 8
}
a = a[n:]
b = b[n:]
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
package flate
import (
"io"
"math"
"sync"
"github.com/klauspost/compress/internal/le"
)
const (
maxStatelessBlock = math.MaxInt16
// dictionary will be taken from maxStatelessBlock, so limit it.
maxStatelessDict = 8 << 10
slTableBits = 13
slTableSize = 1 << slTableBits
slTableShift = 32 - slTableBits
)
type statelessWriter struct {
dst io.Writer
closed bool
}
func (s *statelessWriter) Close() error {
if s.closed {
return nil
}
s.closed = true
// Emit EOF block
return StatelessDeflate(s.dst, nil, true, nil)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
err = StatelessDeflate(s.dst, p, false, nil)
if err != nil {
return 0, err
}
return len(p), nil
}
func (s *statelessWriter) Reset(w io.Writer) {
s.dst = w
s.closed = false
}
// NewStatelessWriter will do compression but without maintaining any state
// between Write calls.
// There will be no memory kept between Write calls,
// but compression and speed will be suboptimal.
// Because of this, the size of actual Write calls will affect output size.
func NewStatelessWriter(dst io.Writer) io.WriteCloser {
return &statelessWriter{dst: dst}
}
// bitWriterPool contains bit writers that can be reused.
var bitWriterPool = sync.Pool{
New: func() interface{} {
return newHuffmanBitWriter(nil)
},
}
// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
defer func() {
// don't keep a reference to our output
bw.reset(nil)
bitWriterPool.Put(bw)
}()
if eof && len(in) == 0 {
// Just write an EOF block.
// Could be faster...
bw.writeStoredHeader(0, true)
bw.flush()
return bw.err
}
// Truncate dict
if len(dict) > maxStatelessDict {
dict = dict[len(dict)-maxStatelessDict:]
}
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
for len(in) > 0 {
todo := in
if len(inDict) > 0 {
if len(todo) > maxStatelessBlock-maxStatelessDict {
todo = todo[:maxStatelessBlock-maxStatelessDict]
}
} else if len(todo) > maxStatelessBlock-len(dict) {
todo = todo[:maxStatelessBlock-len(dict)]
}
inOrg := in
in = in[len(todo):]
uncompressed := todo
if len(dict) > 0 {
// combine dict and source
bufLen := len(todo) + len(dict)
combined := make([]byte, bufLen)
copy(combined, dict)
copy(combined[len(dict):], todo)
todo = combined
}
// Compress
if len(inDict) == 0 {
statelessEnc(&dst, todo, int16(len(dict)))
} else {
statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0
if dst.n == 0 {
bw.writeStoredHeader(len(uncompressed), isEof)
if bw.err != nil {
return bw.err
}
bw.writeBytes(uncompressed)
} else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
// If we removed less than 1/16th, huffman compress the block.
bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
}
if len(in) > 0 {
// Retain a dict if we have more
inDict = inOrg[len(uncompressed)-maxStatelessDict:]
dict = nil
dst.Reset()
}
if bw.err != nil {
return bw.err
}
}
if !eof {
// Align, only a stored block can do that.
bw.writeStoredHeader(0, false)
}
bw.flush()
return bw.err
}
func hashSL(u uint32) uint32 {
return (u * 0x1e35a7bd) >> slTableShift
}
func load3216(b []byte, i int16) uint32 {
return le.Load32(b, i)
}
func load6416(b []byte, i int16) uint64 {
return le.Load64(b, i)
}
func statelessEnc(dst *tokens, src []byte, startAt int16) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
type tableEntry struct {
offset int16
}
var table [slTableSize]tableEntry
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src)-int(startAt) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = 0
return
}
// Index until startAt
if startAt > 0 {
cv := load3232(src, 0)
for i := int16(0); i < startAt; i++ {
table[hashSL(cv)] = tableEntry{offset: i}
cv = (cv >> 8) | (uint32(src[i+4]) << 24)
}
}
s := startAt + 1
nextEmit := startAt
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int16(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load3216(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashSL(cv)
candidate = table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit || nextS <= 0 {
goto emitRemainder
}
now := load6416(src, nextS)
table[nextHash] = tableEntry{offset: s}
nextHash = hashSL(uint32(now))
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
// Do one right away...
cv = uint32(now)
s = nextS
nextS++
candidate = table[nextHash]
now >>= 8
table[nextHash] = tableEntry{offset: s}
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
cv = uint32(now)
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset
l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
// Save the match found
dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6416(src, s-2)
o := s - 2
prevHash := hashSL(uint32(x))
table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashSL(uint32(x))
candidate = table[currHash]
table[currHash] = tableEntry{offset: o + 2}
if uint32(x) != load3216(src, candidate.offset) {
cv = uint32(x >> 8)
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
)
const (
// bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
// bits 16-22 offsetcode - 5 bits
// bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
// bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
literalType = 0 << 30
matchType = 1 << 30
matchOffsetOnlyMask = 0xffff
)
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is lengthCodes[length - MIN_MATCH_LENGTH]
var lengthCodes = [256]uint8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 28,
}
// lengthCodes1 is length codes, but starting at 1.
var lengthCodes1 = [256]uint8{
1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 29,
}
var offsetCodes = [256]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
}
// offsetCodes14 are offsetCodes, but with 14 added.
var offsetCodes14 = [256]uint32{
14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
}
type token uint32
type tokens struct {
extraHist [32]uint16 // codes 256->maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
nFilled int
n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}
func (t *tokens) Reset() {
if t.n == 0 {
return
}
t.n = 0
t.nFilled = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
for i := range t.extraHist[:] {
t.extraHist[i] = 0
}
for i := range t.offHist[:] {
t.offHist[i] = 0
}
}
func (t *tokens) Fill() {
if t.n == 0 {
return
}
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
t.nFilled++
t.extraHist[i] = 1
}
}
for i, v := range t.offHist[:offsetCodeCount] {
if v == 0 {
t.offHist[i] = 1
}
}
}
func indexTokens(in []token) tokens {
var t tokens
t.indexTokens(in)
return t
}
func (t *tokens) indexTokens(in []token) {
t.Reset()
for _, tok := range in {
if tok < matchType {
t.AddLiteral(tok.literal())
continue
}
t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
}
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst *tokens, lit []byte) {
for _, v := range lit {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
func (t *tokens) AddLiteral(lit byte) {
t.tokens[t.n] = token(lit)
t.litHist[lit]++
t.n++
}
// from https://stackoverflow.com/a/28730362
func mFastLog2(val float32) float32 {
ux := int32(math.Float32bits(val))
log2 := (float32)(((ux >> 23) & 255) - 128)
ux &= -0x7f800001
ux += 127 << 23
uval := math.Float32frombits(uint32(ux))
log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
return log2
}
// EstimatedBits will return an minimum size estimated by an *optimal*
// compression of the block.
// The size of the block
func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
total := int(t.n) + t.nFilled
if total > 0 {
invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
}
}
// Just add 15 for EOB
shannon += 15
for i, v := range t.extraHist[1 : literalCount-256] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(lengthExtraBits[i&31]) * int(v)
nMatches += int(v)
}
}
}
if nMatches > 0 {
invTotal := 1.0 / float32(nMatches)
for i, v := range t.offHist[:offsetCodeCount] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(offsetExtraBits[i&31]) * int(v)
}
}
}
return int(shannon) + bits
}
// AddMatch adds a match to the tokens.
// This function is very sensitive to inlining and right on the border.
func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
if debugDeflate {
if xlength >= maxMatchLength+baseMatchLength {
panic(fmt.Errorf("invalid length: %v", xlength))
}
if xoffset >= maxMatchOffset+baseMatchOffset {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
t.extraHist[lengthCodes1[uint8(xlength)]]++
t.offHist[oCode&31]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
t.n++
}
// AddMatchLong adds a match to the tokens, potentially longer than max match length.
// Length should NOT have the base subtracted, only offset should.
func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
if debugDeflate {
if xoffset >= maxMatchOffset+baseMatchOffset {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
oc := offsetCode(xoffset)
xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
// We need to have at least baseMatchLength left over for next loop.
if xl > 258+baseMatchLength {
xl = 258
} else {
xl = 258 - baseMatchLength
}
}
xlength -= xl
xl -= baseMatchLength
t.extraHist[lengthCodes1[uint8(xl)]]++
t.offHist[oc&31]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
t.n++
}
}
func (t *tokens) AddEOB() {
t.tokens[t.n] = token(endBlockMarker)
t.extraHist[0]++
t.n++
}
func (t *tokens) Slice() []token {
return t.tokens[:t.n]
}
// VarInt returns the tokens as varint encoded bytes.
func (t *tokens) VarInt() []byte {
var b = make([]byte, binary.MaxVarintLen32*int(t.n))
var off int
for _, v := range t.tokens[:t.n] {
off += binary.PutUvarint(b[off:], uint64(v))
}
return b[:off]
}
// FromVarInt restores t to the varint encoded tokens provided.
// Any data in t is removed.
func (t *tokens) FromVarInt(b []byte) error {
var buf = bytes.NewReader(b)
var toks []token
for {
r, err := binary.ReadUvarint(buf)
if err == io.EOF {
break
}
if err != nil {
return err
}
toks = append(toks, token(r))
}
t.indexTokens(toks)
return nil
}
// Returns the type of a token
func (t token) typ() uint32 { return uint32(t) & typeMask }
// Returns the literal of a literal token
func (t token) literal() uint8 { return uint8(t) }
// Returns the extra offset of a match token
func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint8 { return uint8(t >> lengthShift) }
// Convert length to code.
func lengthCode(len uint8) uint8 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
if false {
if off < uint32(len(offsetCodes)) {
return offsetCodes[off&255]
} else if off>>7 < uint32(len(offsetCodes)) {
return offsetCodes[(off>>7)&255] + 14
} else {
return offsetCodes[(off>>14)&255] + 28
}
}
if off < uint32(len(offsetCodes)) {
return offsetCodes[uint8(off)]
}
return offsetCodes14[uint8(off>>7)]
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import (
"encoding/binary"
"errors"
"io"
)
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
type bitReader struct {
in []byte
off uint // next byte to read is at in[off - 1]
value uint64
bitsRead uint8
}
// init initializes and resets the bit reader.
func (b *bitReader) init(in []byte) error {
if len(in) < 1 {
return errors.New("corrupt stream: too short")
}
b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
b.fillFastStart()
} else {
b.fill()
b.fill()
}
b.bitsRead += 8 - uint8(highBits(uint32(v)))
return nil
}
// getBits will return n bits. n can be 0.
func (b *bitReader) getBits(n uint8) uint16 {
if n == 0 || b.bitsRead >= 64 {
return 0
}
return b.getBitsFast(n)
}
// getBitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReader) getBitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
// 2 bounds checks.
v := b.in[b.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
}
// fill() will make sure at least 32 bits are available.
func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.off > 4 {
v := b.in[b.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
return
}
for b.off > 0 {
b.value = (b.value << 8) | uint64(b.in[b.off-1])
b.bitsRead -= 8
b.off--
}
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
// Do single re-slice to avoid bounds checks.
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
b.bitsRead = 0
b.off -= 8
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
return b.bitsRead >= 64 && b.off == 0
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
return nil
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import "fmt"
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
bitContainer uint64
nBits uint8
out []byte
}
// bitMask16 is bitmasks. Has extra to avoid bounds check.
var bitMask16 = [32]uint16{
0, 1, 3, 7, 0xF, 0x1F,
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
// This is fastest if bits can be zero.
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
if bits == 0 {
return
}
value <<= (16 - bits) & 15
value >>= (16 - bits) & 15
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.bitContainer >>= v << 3
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
return
}
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24))
b.nBits -= 32
b.bitContainer >>= 32
}
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
b.bitContainer = 0
}
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
}
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
// byteReader provides a byte reader that reads
// little endian values from a byte stream.
// The input stream is manually advanced.
// The reader performs no bounds checks.
type byteReader struct {
b []byte
off int
}
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// Uint32 returns a little endian uint32 starting at current offset.
func (b byteReader) Uint32() uint32 {
b2 := b.b[b.off:]
b2 = b2[:4]
v3 := uint32(b2[3])
v2 := uint32(b2[2])
v1 := uint32(b2[1])
v0 := uint32(b2[0])
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
}
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package fse
import (
"errors"
"fmt"
)
// Compress the input bytes. Input must be < 2GB.
// Provide a Scratch buffer to avoid memory allocations.
// Note that the output is also kept in the scratch buffer.
// If input is too hard to compress, ErrIncompressible is returned.
// If input is a single byte value repeated ErrUseRLE is returned.
func Compress(in []byte, s *Scratch) ([]byte, error) {
if len(in) <= 1 {
return nil, ErrIncompressible
}
if len(in) > (2<<30)-1 {
return nil, errors.New("input too big, must be < 2GB")
}
s, err := s.prepare(in)
if err != nil {
return nil, err
}
// Create histogram, if none was provided.
maxCount := s.maxCount
if maxCount == 0 {
maxCount = s.countSimple(in)
}
// Reset for next run.
s.clearCount = true
s.maxCount = 0
if maxCount == len(in) {
// One symbol, use RLE
return nil, ErrUseRLE
}
if maxCount == 1 || maxCount < (len(in)>>7) {
// Each symbol present maximum once or too well distributed.
return nil, ErrIncompressible
}
s.optimalTableLog()
err = s.normalizeCount()
if err != nil {
return nil, err
}
err = s.writeCount()
if err != nil {
return nil, err
}
if false {
err = s.validateNorm()
if err != nil {
return nil, err
}
}
err = s.buildCTable()
if err != nil {
return nil, err
}
err = s.compress(in)
if err != nil {
return nil, err
}
s.Out = s.bw.out
// Check if we compressed.
if len(s.Out) >= len(in) {
return nil, ErrIncompressible
}
return s.Out, nil
}
// cState contains the compression state of a stream.
type cState struct {
bw *bitWriter
stateTable []uint16
state uint16
}
// init will initialize the compression state to the first symbol of the stream.
func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) {
c.bw = bw
c.stateTable = ct.stateTable
nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
im := int32((nbBitsOut << 16) - first.deltaNbBits)
lu := (im >> nbBitsOut) + first.deltaFindState
c.state = c.stateTable[lu]
}
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encode(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encodeZero(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState
c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
c.bw.addBits16NC(c.state, tableLog)
c.bw.flush()
}
// compress is the main compression loop that will encode the input from the last byte to the first.
func (s *Scratch) compress(src []byte) error {
if len(src) <= 2 {
return errors.New("compress: src too small")
}
tt := s.ct.symbolTT[:256]
s.bw.reset(s.Out)
// Our two states each encodes every second byte.
// Last byte encoded (first byte decoded) will always be encoded by c1.
var c1, c2 cState
// Encode so remaining size is divisible by 4.
ip := len(src)
if ip&1 == 1 {
c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
c1.encodeZero(tt[src[ip-3]])
ip -= 3
} else {
c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]])
c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]])
ip -= 2
}
if ip&2 != 0 {
c2.encodeZero(tt[src[ip-1]])
c1.encodeZero(tt[src[ip-2]])
ip -= 2
}
src = src[:ip]
// Main compression loop.
switch {
case !s.zeroBits && s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush.
// We do not need to check if any output is 0 bits.
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
c2.encode(tt[v2])
c1.encode(tt[v3])
}
case !s.zeroBits:
// We do not need to check if any output is 0 bits.
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
s.bw.flush32()
c2.encode(tt[v2])
c1.encode(tt[v3])
}
case s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
}
default:
for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
s.bw.flush32()
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
}
}
// Flush final state.
// Used to initialize state when decoding.
c2.flush(s.actualTableLog)
c1.flush(s.actualTableLog)
s.bw.close()
return nil
}
// writeCount will write the normalized histogram count to header.
// This is read back by readNCount.
func (s *Scratch) writeCount() error {
var (
tableLog = s.actualTableLog
tableSize = 1 << tableLog
previous0 bool
charnum uint16
maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size
bitStream = uint32(tableLog - minTablelog)
bitCount = uint(4)
remaining = int16(tableSize + 1) /* +1 for extra accuracy */
threshold = int16(tableSize)
nbBits = uint(tableLog + 1)
)
if cap(s.Out) < maxHeaderSize {
s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize)
}
outP := uint(0)
out := s.Out[:maxHeaderSize]
// stops at 1
for remaining > 1 {
if previous0 {
start := charnum
for s.norm[charnum] == 0 {
charnum++
}
for charnum >= start+24 {
start += 24
bitStream += uint32(0xFFFF) << bitCount
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
}
for charnum >= start+3 {
start += 3
bitStream += 3 << bitCount
bitCount += 2
}
bitStream += uint32(charnum-start) << bitCount
bitCount += 2
if bitCount > 16 {
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
bitCount -= 16
}
}
count := s.norm[charnum]
charnum++
max := (2*threshold - 1) - remaining
if count < 0 {
remaining += count
} else {
remaining -= count
}
count++ // +1 for extra accuracy
if count >= threshold {
count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
}
bitStream += uint32(count) << bitCount
bitCount += nbBits
if count < max {
bitCount--
}
previous0 = count == 1
if remaining < 1 {
return errors.New("internal error: remaining<1")
}
for remaining < threshold {
nbBits--
threshold >>= 1
}
if bitCount > 16 {
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
bitCount -= 16
}
}
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += (bitCount + 7) / 8
if charnum > s.symbolLen {
return errors.New("internal error: charnum > s.symbolLen")
}
s.Out = out[:outP]
return nil
}
// symbolTransform contains the state transform for a symbol.
type symbolTransform struct {
deltaFindState int32
deltaNbBits uint32
}
// String prints values as a human readable string.
func (s symbolTransform) String() string {
return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState)
}
// cTable contains tables used for compression.
type cTable struct {
tableSymbol []byte
stateTable []uint16
symbolTT []symbolTransform
}
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *Scratch) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
if cap(s.ct.tableSymbol) < tableSize {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
ctSize := tableSize
if cap(s.ct.stateTable) < ctSize {
s.ct.stateTable = make([]uint16, ctSize)
}
s.ct.stateTable = s.ct.stateTable[:ctSize]
if cap(s.ct.symbolTT) < 256 {
s.ct.symbolTT = make([]symbolTransform, 256)
}
s.ct.symbolTT = s.ct.symbolTT[:256]
}
// buildCTable will populate the compression table so it is ready to be used.
func (s *Scratch) buildCTable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
var cumul [maxSymbolValue + 2]int16
s.allocCtable()
tableSymbol := s.ct.tableSymbol[:tableSize]
// symbol start positions
{
cumul[0] = 0
for ui, v := range s.norm[:s.symbolLen-1] {
u := byte(ui) // one less than reference
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = u
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
}
// Encode last symbol separately to avoid overflowing u
u := int(s.symbolLen - 1)
v := s.norm[s.symbolLen-1]
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = byte(u)
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
if uint32(cumul[s.symbolLen]) != tableSize {
return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = int32(total - 1)
total++
default:
maxBitsOut := uint32(tableLog) - highBits(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = int32(total - v)
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
// countSimple will create a simple histogram in s.count.
// Returns the biggest count.
// Does not update s.clearCount.
func (s *Scratch) countSimple(in []byte) (max int) {
for _, v := range in {
s.count[v]++
}
m, symlen := uint32(0), s.symbolLen
for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m {
m = v
}
symlen = uint16(i) + 1
}
s.symbolLen = symlen
return int(m)
}
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1
minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
}
return uint8(minBitsSymbols)
}
// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
}
if minBits > tableLog {
tableLog = minBits
}
// Need a minimum to safely represent all symbol values
if tableLog < minTablelog {
tableLog = minTablelog
}
if tableLog > maxTableLog {
tableLog = maxTableLog
}
s.actualTableLog = tableLog
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
func (s *Scratch) normalizeCount() error {
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(s.br.remain())
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(s.br.remain() >> tableLog)
)
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
return s.normalizeCount2()
}
s.norm[largest] += stillToDistribute
return nil
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *Scratch) normalizeCount2() error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(s.br.remain())
tableLog = s.actualTableLog
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
distributed++
total -= cnt
continue
}
if cnt <= lowOne {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
s.norm[i] = notYetAssigned
}
toDistribute := (1 << tableLog) - distributed
if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
}
toDistribute = (1 << tableLog) - distributed
}
if distributed == uint32(s.symbolLen)+1 {
// all values are pretty poor;
// probably incompressible data (should have already been detected);
// find max, then give all remaining points to max
var maxV int
var maxC uint32
for i, cnt := range s.count[:s.symbolLen] {
if cnt > maxC {
maxV = i
maxC = cnt
}
}
s.norm[maxV] += int16(toDistribute)
return nil
}
if total == 0 {
// all of the symbols were low enough for the lowOne or lowThreshold
for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
if s.norm[i] > 0 {
toDistribute--
s.norm[i]++
}
}
return nil
}
var (
vStepLog = 62 - uint64(tableLog)
mid = uint64((1 << (vStepLog - 1)) - 1)
rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
tmpTotal = mid
)
for i, cnt := range s.count[:s.symbolLen] {
if s.norm[i] == notYetAssigned {
var (
end = tmpTotal + uint64(cnt)*rStep
sStart = uint32(tmpTotal >> vStepLog)
sEnd = uint32(end >> vStepLog)
weight = sEnd - sStart
)
if weight < 1 {
return errors.New("weight < 1")
}
s.norm[i] = int16(weight)
tmpTotal = end
}
}
return nil
}
// validateNorm validates the normalized histogram table.
func (s *Scratch) validateNorm() (err error) {
var total int
for _, v := range s.norm[:s.symbolLen] {
if v >= 0 {
total += int(v)
} else {
total -= int(v)
}
}
defer func() {
if err == nil {
return
}
fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
for i, v := range s.norm[:s.symbolLen] {
fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
}
}()
if total != (1 << s.actualTableLog) {
return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
}
for i, v := range s.count[s.symbolLen:] {
if v != 0 {
return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
}
}
return nil
}
package fse
import (
"errors"
"fmt"
)
const (
tablelogAbsoluteMax = 15
)
// Decompress a block of data.
// You can provide a scratch buffer to avoid allocations.
// If nil is provided a temporary one will be allocated.
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
return nil, err
}
s.Out = s.Out[:0]
err = s.readNCount()
if err != nil {
return nil, err
}
err = s.buildDtable()
if err != nil {
return nil, err
}
err = s.decompress()
if err != nil {
return nil, err
}
return s.Out, nil
}
// readNCount will read the symbol distribution so decoding tables can be constructed.
func (s *Scratch) readNCount() error {
var (
charnum uint16
previous0 bool
b = &s.br
)
iend := b.remain()
if iend < 4 {
return errors.New("input too small")
}
bitStream := b.Uint32()
nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
if nbBits > tablelogAbsoluteMax {
return errors.New("tableLog too large")
}
bitStream >>= 4
bitCount := uint(4)
s.actualTableLog = uint8(nbBits)
remaining := int32((1 << nbBits) + 1)
threshold := int32(1 << nbBits)
gotTotal := int32(0)
nbBits++
for remaining > 1 {
if previous0 {
n0 := charnum
for (bitStream & 0xFFFF) == 0xFFFF {
n0 += 24
if b.off < iend-5 {
b.advance(2)
bitStream = b.Uint32() >> bitCount
} else {
bitStream >>= 16
bitCount += 16
}
}
for (bitStream & 3) == 3 {
n0 += 3
bitStream >>= 2
bitCount += 2
}
n0 += uint16(bitStream & 3)
bitCount += 2
if n0 > maxSymbolValue {
return errors.New("maxSymbolValue too small")
}
for charnum < n0 {
s.norm[charnum&0xff] = 0
charnum++
}
if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
b.advance(bitCount >> 3)
bitCount &= 7
bitStream = b.Uint32() >> bitCount
} else {
bitStream >>= 2
}
}
max := (2*(threshold) - 1) - (remaining)
var count int32
if (int32(bitStream) & (threshold - 1)) < max {
count = int32(bitStream) & (threshold - 1)
bitCount += nbBits - 1
} else {
count = int32(bitStream) & (2*threshold - 1)
if count >= threshold {
count -= max
}
bitCount += nbBits
}
count-- // extra accuracy
if count < 0 {
// -1 means +1
remaining += count
gotTotal -= count
} else {
remaining -= count
gotTotal += count
}
s.norm[charnum&0xff] = int16(count)
charnum++
previous0 = count == 0
for remaining < threshold {
nbBits--
threshold >>= 1
}
if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 {
b.advance(bitCount >> 3)
bitCount &= 7
} else {
bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
b.off = len(b.b) - 4
}
bitStream = b.Uint32() >> (bitCount & 31)
}
s.symbolLen = charnum
if s.symbolLen <= 1 {
return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
}
if s.symbolLen > maxSymbolValue+1 {
return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
}
if remaining != 1 {
return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
}
if bitCount > 32 {
return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
}
if gotTotal != 1<<s.actualTableLog {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
}
b.advance((bitCount + 7) >> 3)
return nil
}
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
type decSymbol struct {
newState uint16
symbol uint8
nbBits uint8
}
// allocDtable will allocate decoding tables if they are not big enough.
func (s *Scratch) allocDtable() {
tableSize := 1 << s.actualTableLog
if cap(s.decTable) < tableSize {
s.decTable = make([]decSymbol, tableSize)
}
s.decTable = s.decTable[:tableSize]
if cap(s.ct.tableSymbol) < 256 {
s.ct.tableSymbol = make([]byte, 256)
}
s.ct.tableSymbol = s.ct.tableSymbol[:256]
if cap(s.ct.stateTable) < 256 {
s.ct.stateTable = make([]uint16, 256)
}
s.ct.stateTable = s.ct.stateTable[:256]
}
// buildDtable will build the decoding table.
func (s *Scratch) buildDtable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
s.allocDtable()
symbolNext := s.ct.stateTable[:256]
// Init, lay down lowprob symbols
s.zeroBits = false
{
largeLimit := int16(1 << (s.actualTableLog - 1))
for i, v := range s.norm[:s.symbolLen] {
if v == -1 {
s.decTable[highThreshold].symbol = uint8(i)
highThreshold--
symbolNext[i] = 1
} else {
if v >= largeLimit {
s.zeroBits = true
}
symbolNext[i] = uint16(v)
}
}
}
// Spread symbols
{
tableMask := tableSize - 1
step := tableStep(tableSize)
position := uint32(0)
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.decTable[position].symbol = uint8(ss)
position = (position + step) & tableMask
for position > highThreshold {
// lowprob area
position = (position + step) & tableMask
}
}
}
if position != 0 {
// position must reach all cells once, otherwise normalizedCounter is incorrect
return errors.New("corrupted input (position != 0)")
}
}
// Build Decoding table
{
tableSize := uint16(1 << s.actualTableLog)
for u, v := range s.decTable {
symbol := v.symbol
nextState := symbolNext[symbol]
symbolNext[symbol] = nextState + 1
nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
s.decTable[u].nbBits = nBits
newState := (nextState << nBits) - tableSize
if newState >= tableSize {
return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
}
if newState == uint16(u) && nBits == 0 {
// Seems weird that this is possible with nbits > 0.
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
}
s.decTable[u].newState = newState
}
}
return nil
}
// decompress will decompress the bitstream.
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
if err := br.init(s.br.unread()); err != nil {
return err
}
var s1, s2 decoder
// Initialize and decode first state and symbol.
s1.init(br, s.decTable, s.actualTableLog)
s2.init(br, s.decTable, s.actualTableLog)
// Use temp table to avoid bound checks/append penalty.
var tmp = s.ct.tableSymbol[:256]
var off uint8
// Main part
if !s.zeroBits {
for br.off >= 8 {
br.fillFast()
tmp[off+0] = s1.nextFast()
tmp[off+1] = s2.nextFast()
br.fillFast()
tmp[off+2] = s1.nextFast()
tmp[off+3] = s2.nextFast()
off += 4
// When off is 0, we have overflowed and should write.
if off == 0 {
s.Out = append(s.Out, tmp...)
if len(s.Out) >= s.DecompressLimit {
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
}
}
}
} else {
for br.off >= 8 {
br.fillFast()
tmp[off+0] = s1.next()
tmp[off+1] = s2.next()
br.fillFast()
tmp[off+2] = s1.next()
tmp[off+3] = s2.next()
off += 4
if off == 0 {
s.Out = append(s.Out, tmp...)
// When off is 0, we have overflowed and should write.
if len(s.Out) >= s.DecompressLimit {
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
}
}
}
}
s.Out = append(s.Out, tmp[:off]...)
// Final bits, a bit more expensive check
for {
if s1.finished() {
s.Out = append(s.Out, s1.final(), s2.final())
break
}
br.fill()
s.Out = append(s.Out, s1.next())
if s2.finished() {
s.Out = append(s.Out, s2.final(), s1.final())
break
}
s.Out = append(s.Out, s2.next())
if len(s.Out) >= s.DecompressLimit {
return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit)
}
}
return br.close()
}
// decoder keeps track of the current state and updates it from the bitstream.
type decoder struct {
state uint16
br *bitReader
dt []decSymbol
}
// init will initialize the decoder and read the first state from the stream.
func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) {
d.dt = dt
d.br = in
d.state = in.getBits(tableLog)
}
// next returns the next symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (d *decoder) next() uint8 {
n := &d.dt[d.state]
lowBits := d.br.getBits(n.nbBits)
d.state = n.newState + lowBits
return n.symbol
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (d *decoder) finished() bool {
return d.br.finished() && d.dt[d.state].nbBits > 0
}
// final returns the current state symbol without decoding the next.
func (d *decoder) final() uint8 {
return d.dt[d.state].symbol
}
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (d *decoder) nextFast() uint8 {
n := d.dt[d.state]
lowBits := d.br.getBitsFast(n.nbBits)
d.state = n.newState + lowBits
return n.symbol
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
// Package fse provides Finite State Entropy encoding and decoding.
//
// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding
// for byte blocks as implemented in zstd.
//
// See https://github.com/klauspost/compress/tree/master/fse for more information.
package fse
import (
"errors"
"fmt"
"math/bits"
)
const (
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
maxMemoryUsage = 14
defaultMemoryUsage = 13
maxTableLog = maxMemoryUsage - 2
maxTablesize = 1 << maxTableLog
defaultTablelog = defaultMemoryUsage - 2
minTablelog = 5
maxSymbolValue = 255
)
var (
// ErrIncompressible is returned when input is judged to be too hard to compress.
ErrIncompressible = errors.New("input is not compressible")
// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
ErrUseRLE = errors.New("input is single value repeated")
)
// Scratch provides temporary storage for compression and decompression.
type Scratch struct {
// Private
count [maxSymbolValue + 1]uint32
norm [maxSymbolValue + 1]int16
br byteReader
bits bitReader
bw bitWriter
ct cTable // Compression tables.
decTable []decSymbol // Decompression table.
maxCount int // count of the most probable symbol
// Per block parameters.
// These can be used to override compression parameters of the block.
// Do not touch, unless you know what you are doing.
// Out is output buffer.
// If the scratch is re-used before the caller is done processing the output,
// set this field to nil.
// Otherwise the output buffer will be re-used for next Compression/Decompression step
// and allocation will be avoided.
Out []byte
// DecompressLimit limits the maximum decoded size acceptable.
// If > 0 decompression will stop when approximately this many bytes
// has been decoded.
// If 0, maximum size will be 2GB.
DecompressLimit int
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
TableLog uint8
}
// Histogram allows to populate the histogram and skip that step in the compression,
// It otherwise allows to inspect the histogram when compression is done.
// To indicate that you have populated the histogram call HistogramFinished
// with the value of the highest populated symbol, as well as the number of entries
// in the most populated entry. These are accepted at face value.
// The returned slice will always be length 256.
func (s *Scratch) Histogram() []uint32 {
return s.count[:]
}
// HistogramFinished can be called to indicate that the histogram has been populated.
// maxSymbol is the index of the highest set symbol of the next data segment.
// maxCount is the number of entries in the most populated entry.
// These are accepted at face value.
func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) {
s.maxCount = maxCount
s.symbolLen = uint16(maxSymbol) + 1
s.clearCount = maxCount != 0
}
// prepare will prepare and allocate scratch tables used for both compression and decompression.
func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s == nil {
s = &Scratch{}
}
if s.MaxSymbolValue == 0 {
s.MaxSymbolValue = 255
}
if s.TableLog == 0 {
s.TableLog = defaultTablelog
}
if s.TableLog > maxTableLog {
return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog)
}
if cap(s.Out) == 0 {
s.Out = make([]byte, 0, len(in))
}
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
s.br.init(in)
if s.DecompressLimit == 0 {
// Max size 2GB.
s.DecompressLimit = (2 << 30) - 1
}
return s, nil
}
// tableStep returns the next table index.
func tableStep(tableSize uint32) uint32 {
return (tableSize >> 1) + (tableSize >> 3) + 3
}
func highBits(val uint32) (n uint32) {
return uint32(bits.Len32(val) - 1)
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package huff0
import (
"errors"
"fmt"
"io"
"github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
type bitReaderBytes struct {
in []byte
off uint // next byte to read is at in[off - 1]
value uint64
bitsRead uint8
}
// init initializes and resets the bit reader.
func (b *bitReaderBytes) init(in []byte) error {
if len(in) < 1 {
return errors.New("corrupt stream: too short")
}
b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
b.fillFastStart()
} else {
b.fill()
b.fill()
}
b.advance(8 - uint8(highBit32(uint32(v))))
return nil
}
// peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56)
return got
}
func (b *bitReaderBytes) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReaderBytes) fillFast() {
if b.bitsRead < 32 {
return
}
// 2 bounds checks.
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
}
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks.
b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
// fill() will make sure at least 32 bits are available.
func (b *bitReaderBytes) fill() {
if b.bitsRead < 32 {
return
}
if b.off >= 4 {
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
return
}
for b.off > 0 {
b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8)
b.bitsRead -= 8
b.off--
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReaderBytes) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
func (b *bitReaderBytes) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderBytes) close() error {
// Release reference.
b.in = nil
if b.remaining() > 0 {
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
}
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
return nil
}
// bitReaderShifted reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
type bitReaderShifted struct {
in []byte
off uint // next byte to read is at in[off - 1]
value uint64
bitsRead uint8
}
// init initializes and resets the bit reader.
func (b *bitReaderShifted) init(in []byte) error {
if len(in) < 1 {
return errors.New("corrupt stream: too short")
}
b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
b.fillFastStart()
} else {
b.fill()
b.fill()
}
b.advance(8 - uint8(highBit32(uint32(v))))
return nil
}
// peekBitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReaderShifted) fillFast() {
if b.bitsRead < 32 {
return
}
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
}
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() {
b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
// fill() will make sure at least 32 bits are available.
func (b *bitReaderShifted) fill() {
if b.bitsRead < 32 {
return
}
if b.off > 4 {
low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
return
}
for b.off > 0 {
b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63)
b.bitsRead -= 8
b.off--
}
}
func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderShifted) close() error {
// Release reference.
b.in = nil
if b.remaining() > 0 {
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
}
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
return nil
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package huff0
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
bitContainer uint64
nBits uint8
out []byte
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
enc := ct[symbol]
b.bitContainer |= uint64(enc.val) << (b.nBits & 63)
if false {
if enc.nBits == 0 {
panic("nbits 0")
}
}
b.nBits += enc.nBits
}
// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
encA := ct[av]
encB := ct[bv]
sh := b.nBits & 63
combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
b.bitContainer |= combined << sh
if false {
if encA.nBits == 0 {
panic("nbitsA 0")
}
if encB.nBits == 0 {
panic("nbitsB 0")
}
}
b.nBits += encA.nBits + encB.nBits
}
// encFourSymbols adds up to 32 bits from four symbols.
// It will not check if there is space for them,
// so the caller must ensure that b has been flushed recently.
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
bitsA := encA.nBits
bitsB := bitsA + encB.nBits
bitsC := bitsB + encC.nBits
bitsD := bitsC + encD.nBits
combined := uint64(encA.val) |
(uint64(encB.val) << (bitsA & 63)) |
(uint64(encC.val) << (bitsB & 63)) |
(uint64(encD.val) << (bitsC & 63))
b.bitContainer |= combined << (b.nBits & 63)
b.nBits += bitsD
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
return
}
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24))
b.nBits -= 32
b.bitContainer >>= 32
}
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
b.bitContainer = 0
}
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
}
package huff0
import (
"fmt"
"math"
"runtime"
"sync"
)
// Compress1X will compress the input.
// The output can be decoded using Decompress1X.
// Supply a Scratch object. The scratch object contains state about re-use,
// So when sharing across independent encodes, be sure to set the re-use policy.
func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
s, err = s.prepare(in)
if err != nil {
return nil, false, err
}
return compress(in, s, s.compress1X)
}
// Compress4X will compress the input. The input is split into 4 independent blocks
// and compressed similar to Compress1X.
// The output can be decoded using Decompress4X.
// Supply a Scratch object. The scratch object contains state about re-use,
// So when sharing across independent encodes, be sure to set the re-use policy.
func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) {
s, err = s.prepare(in)
if err != nil {
return nil, false, err
}
if false {
// TODO: compress4Xp only slightly faster.
const parallelThreshold = 8 << 10
if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 {
return compress(in, s, s.compress4X)
}
return compress(in, s, s.compress4Xp)
}
return compress(in, s, s.compress4X)
}
func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) {
// Nuke previous table if we cannot reuse anyway.
if s.Reuse == ReusePolicyNone {
s.prevTable = s.prevTable[:0]
}
// Create histogram, if none was provided.
maxCount := s.maxCount
var canReuse = false
if maxCount == 0 {
maxCount, canReuse = s.countSimple(in)
} else {
canReuse = s.canUseTable(s.prevTable)
}
// We want the output size to be less than this:
wantSize := len(in)
if s.WantLogLess > 0 {
wantSize -= wantSize >> s.WantLogLess
}
// Reset for next run.
s.clearCount = true
s.maxCount = 0
if maxCount >= len(in) {
if maxCount > len(in) {
return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
}
if len(in) == 1 {
return nil, false, ErrIncompressible
}
// One symbol, use RLE
return nil, false, ErrUseRLE
}
if maxCount == 1 || maxCount < (len(in)>>7) {
// Each symbol present maximum once or too well distributed.
return nil, false, ErrIncompressible
}
if s.Reuse == ReusePolicyMust && !canReuse {
// We must reuse, but we can't.
return nil, false, ErrIncompressible
}
if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse {
keepTable := s.cTable
keepTL := s.actualTableLog
s.cTable = s.prevTable
s.actualTableLog = s.prevTableLog
s.Out, err = compressor(in)
s.cTable = keepTable
s.actualTableLog = keepTL
if err == nil && len(s.Out) < wantSize {
s.OutData = s.Out
return s.Out, true, nil
}
if s.Reuse == ReusePolicyMust {
return nil, false, ErrIncompressible
}
// Do not attempt to re-use later.
s.prevTable = s.prevTable[:0]
}
// Calculate new table.
err = s.buildCTable()
if err != nil {
return nil, false, err
}
if false && !s.canUseTable(s.cTable) {
panic("invalid table generated")
}
if s.Reuse == ReusePolicyAllow && canReuse {
hSize := len(s.Out)
oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen])
newSize := s.cTable.estimateSize(s.count[:s.symbolLen])
if oldSize <= hSize+newSize || hSize+12 >= wantSize {
// Retain cTable even if we re-use.
keepTable := s.cTable
keepTL := s.actualTableLog
s.cTable = s.prevTable
s.actualTableLog = s.prevTableLog
s.Out, err = compressor(in)
// Restore ctable.
s.cTable = keepTable
s.actualTableLog = keepTL
if err != nil {
return nil, false, err
}
if len(s.Out) >= wantSize {
return nil, false, ErrIncompressible
}
s.OutData = s.Out
return s.Out, true, nil
}
}
// Use new table
err = s.cTable.write(s)
if err != nil {
s.OutTable = nil
return nil, false, err
}
s.OutTable = s.Out
// Compress using new table
s.Out, err = compressor(in)
if err != nil {
s.OutTable = nil
return nil, false, err
}
if len(s.Out) >= wantSize {
s.OutTable = nil
return nil, false, ErrIncompressible
}
// Move current table into previous.
s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
s.OutData = s.Out[len(s.OutTable):]
return s.Out, false, nil
}
// EstimateSizes will estimate the data sizes
func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) {
s, err = s.prepare(in)
if err != nil {
return 0, 0, 0, err
}
// Create histogram, if none was provided.
tableSz, dataSz, reuseSz = -1, -1, -1
maxCount := s.maxCount
var canReuse = false
if maxCount == 0 {
maxCount, canReuse = s.countSimple(in)
} else {
canReuse = s.canUseTable(s.prevTable)
}
// We want the output size to be less than this:
wantSize := len(in)
if s.WantLogLess > 0 {
wantSize -= wantSize >> s.WantLogLess
}
// Reset for next run.
s.clearCount = true
s.maxCount = 0
if maxCount >= len(in) {
if maxCount > len(in) {
return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in))
}
if len(in) == 1 {
return 0, 0, 0, ErrIncompressible
}
// One symbol, use RLE
return 0, 0, 0, ErrUseRLE
}
if maxCount == 1 || maxCount < (len(in)>>7) {
// Each symbol present maximum once or too well distributed.
return 0, 0, 0, ErrIncompressible
}
// Calculate new table.
err = s.buildCTable()
if err != nil {
return 0, 0, 0, err
}
if false && !s.canUseTable(s.cTable) {
panic("invalid table generated")
}
tableSz, err = s.cTable.estTableSize(s)
if err != nil {
return 0, 0, 0, err
}
if canReuse {
reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen])
}
dataSz = s.cTable.estimateSize(s.count[:s.symbolLen])
// Restore
return tableSz, dataSz, reuseSz, nil
}
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src), nil
}
func (s *Scratch) compress1xDo(dst, src []byte) []byte {
var bw = bitWriter{out: dst}
// N is length divisible by 4.
n := len(src)
n -= n & 3
cTable := s.cTable[:256]
// Encode last bytes.
for i := len(src) & 3; i > 0; i-- {
bw.encSymbol(cTable, src[n+i-1])
}
n -= 4
if s.actualTableLog <= 8 {
for ; n >= 0; n -= 4 {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
}
} else {
for ; n >= 0; n -= 4 {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
bw.flush32()
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
}
bw.close()
return bw.out
}
var sixZeros [6]byte
func (s *Scratch) compress4X(src []byte) ([]byte, error) {
if len(src) < 12 {
return nil, ErrIncompressible
}
segmentSize := (len(src) + 3) / 4
// Add placeholder for output length
offsetIdx := len(s.Out)
s.Out = append(s.Out, sixZeros[:]...)
for i := 0; i < 4; i++ {
toDo := src
if len(toDo) > segmentSize {
toDo = toDo[:segmentSize]
}
src = src[len(toDo):]
idx := len(s.Out)
s.Out = s.compress1xDo(s.Out, toDo)
if len(s.Out)-idx > math.MaxUint16 {
// We cannot store the size in the jump table
return nil, ErrIncompressible
}
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
length := len(s.Out) - idx
s.Out[i*2+offsetIdx] = byte(length)
s.Out[i*2+offsetIdx+1] = byte(length >> 8)
}
}
return s.Out, nil
}
// compress4Xp will compress 4 streams using separate goroutines.
func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
if len(src) < 12 {
return nil, ErrIncompressible
}
// Add placeholder for output length
s.Out = s.Out[:6]
segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup
wg.Add(4)
for i := 0; i < 4; i++ {
toDo := src
if len(toDo) > segmentSize {
toDo = toDo[:segmentSize]
}
src = src[len(toDo):]
// Separate goroutine for each block.
go func(i int) {
s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
wg.Done()
}(i)
}
wg.Wait()
for i := 0; i < 4; i++ {
o := s.tmpOut[i]
if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table
return nil, ErrIncompressible
}
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
s.Out[i*2] = byte(len(o))
s.Out[i*2+1] = byte(len(o) >> 8)
}
// Write output.
s.Out = append(s.Out, o...)
}
return s.Out, nil
}
// countSimple will create a simple histogram in s.count.
// Returns the biggest count.
// Does not update s.clearCount.
func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
reuse = true
_ = s.count // Assert that s != nil to speed up the following loop.
for _, v := range in {
s.count[v]++
}
m := uint32(0)
if len(s.prevTable) > 0 {
for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m {
m = v
}
s.symbolLen = uint16(i) + 1
if i >= len(s.prevTable) {
reuse = false
} else if s.prevTable[i].nBits == 0 {
reuse = false
}
}
return int(m), reuse
}
for i, v := range s.count[:] {
if v == 0 {
continue
}
if v > m {
m = v
}
s.symbolLen = uint16(i) + 1
}
return int(m), false
}
func (s *Scratch) canUseTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false
}
for i, v := range s.count[:s.symbolLen] {
if v != 0 && c[i].nBits == 0 {
return false
}
}
return true
}
//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false
}
for i, v := range s.count[:s.symbolLen] {
if v != 0 {
if c[i].nBits == 0 {
return false
}
if c[i].nBits > s.actualTableLog {
return false
}
}
}
return true
}
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
minBitsSrc := highBit32(uint32(s.srcLen)) + 1
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
}
return uint8(minBitsSymbols)
}
// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
}
if minBits > tableLog {
tableLog = minBits
}
// Need a minimum to safely represent all symbol values
if tableLog < minTablelog {
tableLog = minTablelog
}
if tableLog > tableLogMax {
tableLog = tableLogMax
}
s.actualTableLog = tableLog
}
type cTableEntry struct {
val uint16
nBits uint8
// We have 8 bits extra
}
const huffNodesMask = huffNodesLen - 1
func (s *Scratch) buildCTable() error {
s.optimalTableLog()
s.huffSort()
if cap(s.cTable) < maxSymbolValue+1 {
s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
} else {
s.cTable = s.cTable[:s.symbolLen]
for i := range s.cTable {
s.cTable[i] = cTableEntry{}
}
}
var startNode = int16(s.symbolLen)
nonNullRank := s.symbolLen - 1
nodeNb := startNode
huffNode := s.nodes[1 : huffNodesLen+1]
// This overlays the slice above, but allows "-1" index lookups.
// Different from reference implementation.
huffNode0 := s.nodes[0 : huffNodesLen+1]
for huffNode[nonNullRank].count() == 0 {
nonNullRank--
}
lowS := int16(nonNullRank)
nodeRoot := nodeNb + lowS - 1
lowN := nodeNb
huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
huffNode[lowS].setParent(nodeNb)
huffNode[lowS-1].setParent(nodeNb)
nodeNb++
lowS -= 2
for n := nodeNb; n <= nodeRoot; n++ {
huffNode[n].setCount(1 << 30)
}
// fake entry, strong barrier
huffNode0[0].setCount(1 << 31)
// create parents
for nodeNb <= nodeRoot {
var n1, n2 int16
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n1 = lowS
lowS--
} else {
n1 = lowN
lowN++
}
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n2 = lowS
lowS--
} else {
n2 = lowN
lowN++
}
huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
huffNode0[n1+1].setParent(nodeNb)
huffNode0[n2+1].setParent(nodeNb)
nodeNb++
}
// distribute weights (unlimited tree height)
huffNode[nodeRoot].setNbBits(0)
for n := nodeRoot - 1; n >= startNode; n-- {
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
for n := uint16(0); n <= nonNullRank; n++ {
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
maxNbBits := s.actualTableLog
// fill result into tree (val, nbBits)
if maxNbBits > tableLogMax {
return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
}
var nbPerRank [tableLogMax + 1]uint16
var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] {
nbPerRank[v.nbBits()]++
}
// determine stating value per rank
{
min := uint16(0)
for n := maxNbBits; n > 0; n-- {
// get starting value within each rank
valPerRank[n] = min
min += nbPerRank[n]
min >>= 1
}
}
// push nbBits per symbol, symbol order
for _, v := range huffNode[:nonNullRank+1] {
s.cTable[v.symbol()].nBits = v.nbBits()
}
// assign value within rank, symbol order
t := s.cTable[:s.symbolLen]
for n, val := range t {
nbits := val.nBits & 15
v := valPerRank[nbits]
t[n].val = v
valPerRank[nbits] = v + 1
}
return nil
}
// huffSort will sort symbols, decreasing order.
func (s *Scratch) huffSort() {
type rankPos struct {
base uint32
current uint32
}
// Clear nodes
nodes := s.nodes[:huffNodesLen+1]
s.nodes = nodes
nodes = nodes[1 : huffNodesLen+1]
// Sort into buckets based on length of symbol count.
var rank [32]rankPos
for _, v := range s.count[:s.symbolLen] {
r := highBit32(v+1) & 31
rank[r].base++
}
// maxBitLength is log2(BlockSizeMax) + 1
const maxBitLength = 18 + 1
for n := maxBitLength; n > 0; n-- {
rank[n-1].base += rank[n].base
}
for n := range rank[:maxBitLength] {
rank[n].current = rank[n].base
}
for n, c := range s.count[:s.symbolLen] {
r := (highBit32(c+1) + 1) & 31
pos := rank[r].current
rank[r].current++
prev := nodes[(pos-1)&huffNodesMask]
for pos > rank[r].base && c > prev.count() {
nodes[pos&huffNodesMask] = prev
pos--
prev = nodes[(pos-1)&huffNodesMask]
}
nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
}
}
func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
maxNbBits := s.actualTableLog
huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen]
largestBits := huffNode[lastNonNull].nbBits()
// early exit : no elt > maxNbBits
if largestBits <= maxNbBits {
return largestBits
}
totalCost := int(0)
baseCost := int(1) << (largestBits - maxNbBits)
n := uint32(lastNonNull)
for huffNode[n].nbBits() > maxNbBits {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
huffNode[n].setNbBits(maxNbBits)
n--
}
// n stops at huffNode[n].nbBits <= maxNbBits
for huffNode[n].nbBits() == maxNbBits {
n--
}
// n end at index of smallest symbol using < maxNbBits
// renorm totalCost
totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */
// repay normalized cost
{
const noSymbol = 0xF0F0F0F0
var rankLast [tableLogMax + 2]uint32
for i := range rankLast[:] {
rankLast[i] = noSymbol
}
// Get pos of last (smallest) symbol per rank
{
currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- {
if huffNode[pos].nbBits() >= currentNbBits {
continue
}
currentNbBits = huffNode[pos].nbBits() // < maxNbBits
rankLast[maxNbBits-currentNbBits] = uint32(pos)
}
}
for totalCost > 0 {
nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1
for ; nBitsToDecrease > 1; nBitsToDecrease-- {
highPos := rankLast[nBitsToDecrease]
lowPos := rankLast[nBitsToDecrease-1]
if highPos == noSymbol {
continue
}
if lowPos == noSymbol {
break
}
highTotal := huffNode[highPos].count()
lowTotal := 2 * huffNode[lowPos].count()
if highTotal <= lowTotal {
break
}
}
// only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !)
// HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary
// FIXME: try to remove
for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) {
nBitsToDecrease++
}
totalCost -= 1 << (nBitsToDecrease - 1)
if rankLast[nBitsToDecrease-1] == noSymbol {
// this rank is no longer empty
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
}
huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
huffNode[rankLast[nBitsToDecrease]].nbBits())
if rankLast[nBitsToDecrease] == 0 {
/* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol
} else {
rankLast[nBitsToDecrease]--
if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
}
}
}
for totalCost < 0 { /* Sometimes, cost correction overshoot */
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
for huffNode[n].nbBits() == maxNbBits {
n--
}
huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
rankLast[1] = n + 1
totalCost++
continue
}
huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
rankLast[1]++
totalCost++
}
}
return maxNbBits
}
// A nodeElt is the fields
//
// count uint32
// parent uint16
// symbol byte
// nbBits uint8
//
// in some order, all squashed into an integer so that the compiler
// always loads and stores entire nodeElts instead of separate fields.
type nodeElt uint64
func makeNodeElt(count uint32, symbol byte) nodeElt {
return nodeElt(count) | nodeElt(symbol)<<48
}
func (e *nodeElt) count() uint32 { return uint32(*e) }
func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
package huff0
import (
"errors"
"fmt"
"io"
"sync"
"github.com/klauspost/compress/fse"
)
type dTable struct {
single []dEntrySingle
}
// single-symbols decoding
type dEntrySingle struct {
entry uint16
}
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
// ReadTable will read a table from the input.
// The size of the input may be larger than the table definition.
// Any content remaining after the table definition will be returned.
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
if len(in) <= 1 {
return s, nil, errors.New("input too small for table")
}
iSize := in[0]
in = in[1:]
if iSize >= 128 {
// Uncompressed
oSize := iSize - 127
iSize = (oSize + 1) / 2
if int(iSize) > len(in) {
return s, nil, errors.New("input too small for table")
}
for n := uint8(0); n < oSize; n += 2 {
v := in[n/2]
s.huffWeight[n] = v >> 4
s.huffWeight[n+1] = v & 15
}
s.symbolLen = uint16(oSize)
in = in[iSize:]
} else {
if len(in) < int(iSize) {
return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in))
}
// FSE compressed weights
s.fse.DecompressLimit = 255
hw := s.huffWeight[:]
s.fse.Out = hw
b, err := fse.Decompress(in[:iSize], s.fse)
s.fse.Out = nil
if err != nil {
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
}
if len(b) > 255 {
return s, nil, errors.New("corrupt input: output table too large")
}
s.symbolLen = uint16(len(b))
in = in[iSize:]
}
// collect weight stats
var rankStats [16]uint32
weightTotal := uint32(0)
for _, v := range s.huffWeight[:s.symbolLen] {
if v > tableLogMax {
return s, nil, errors.New("corrupt input: weight too large")
}
v2 := v & 15
rankStats[v2]++
// (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0.
weightTotal += (1 << v2) >> 1
}
if weightTotal == 0 {
return s, nil, errors.New("corrupt input: weights zero")
}
// get last non-null symbol weight (implied, total must be 2^n)
{
tableLog := highBit32(weightTotal) + 1
if tableLog > tableLogMax {
return s, nil, errors.New("corrupt input: tableLog too big")
}
s.actualTableLog = uint8(tableLog)
// determine last weight
{
total := uint32(1) << tableLog
rest := total - weightTotal
verif := uint32(1) << highBit32(rest)
lastWeight := highBit32(rest) + 1
if verif != rest {
// last value must be a clean power of 2
return s, nil, errors.New("corrupt input: last value not power of two")
}
s.huffWeight[s.symbolLen] = uint8(lastWeight)
s.symbolLen++
rankStats[lastWeight]++
}
}
if (rankStats[1] < 2) || (rankStats[1]&1 != 0) {
// by construction : at least 2 elts of rank 1, must be even
return s, nil, errors.New("corrupt input: min elt size, even check failed ")
}
// TODO: Choose between single/double symbol decoding
// Calculate starting value for each rank
{
var nextRankStart uint32
for n := uint8(1); n < s.actualTableLog+1; n++ {
current := nextRankStart
nextRankStart += rankStats[n] << (n - 1)
rankStats[n] = current
}
}
// fill DTable (always full size)
tSize := 1 << tableLogMax
if len(s.dt.single) != tSize {
s.dt.single = make([]dEntrySingle, tSize)
}
cTable := s.prevTable
if cap(cTable) < maxSymbolValue+1 {
cTable = make([]cTableEntry, 0, maxSymbolValue+1)
}
cTable = cTable[:maxSymbolValue+1]
s.prevTable = cTable[:s.symbolLen]
s.prevTableLog = s.actualTableLog
for n, w := range s.huffWeight[:s.symbolLen] {
if w == 0 {
cTable[n] = cTableEntry{
val: 0,
nBits: 0,
}
continue
}
length := (uint32(1) << w) >> 1
d := dEntrySingle{
entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
}
rank := &rankStats[w]
cTable[n] = cTableEntry{
val: uint16(*rank >> (w - 1)),
nBits: uint8(d.entry),
}
single := s.dt.single[*rank : *rank+length]
for i := range single {
single[i] = d
}
*rank += length
}
return s, in, nil
}
// Decompress1X will decompress a 1X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// Before this is called, the table must be initialized with ReadTable unless
// the encoder re-used the table.
// deprecated: Use the stateless Decoder() to get a concurrent version.
func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
if cap(s.Out) < s.MaxDecodedSize {
s.Out = make([]byte, s.MaxDecodedSize)
}
s.Out = s.Out[:0:s.MaxDecodedSize]
s.Out, err = s.Decoder().Decompress1X(s.Out, in)
return s.Out, err
}
// Decompress4X will decompress a 4X encoded stream.
// Before this is called, the table must be initialized with ReadTable unless
// the encoder re-used the table.
// The length of the supplied input must match the end of a block exactly.
// The destination size of the uncompressed data must be known and provided.
// deprecated: Use the stateless Decoder() to get a concurrent version.
func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
if dstSize > s.MaxDecodedSize {
return nil, ErrMaxDecodedSizeExceeded
}
if cap(s.Out) < dstSize {
s.Out = make([]byte, s.MaxDecodedSize)
}
s.Out = s.Out[:0:dstSize]
s.Out, err = s.Decoder().Decompress4X(s.Out, in)
return s.Out, err
}
// Decoder will return a stateless decoder that can be used by multiple
// decompressors concurrently.
// Before this is called, the table must be initialized with ReadTable.
// The Decoder is still linked to the scratch buffer so that cannot be reused.
// However, it is safe to discard the scratch.
func (s *Scratch) Decoder() *Decoder {
return &Decoder{
dt: s.dt,
actualTableLog: s.actualTableLog,
bufs: &s.decPool,
}
}
// Decoder provides stateless decoding.
type Decoder struct {
dt dTable
actualTableLog uint8
bufs *sync.Pool
}
func (d *Decoder) buffer() *[4][256]byte {
buf, ok := d.bufs.Get().(*[4][256]byte)
if ok {
return buf
}
return &[4][256]byte{}
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if d.actualTableLog == 8 {
return d.decompress1X8BitExactly(dst, src)
}
var br bitReaderBytes
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
switch d.actualTableLog {
case 8:
const shift = 0
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 7:
const shift = 8 - 7
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 6:
const shift = 8 - 6
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 5:
const shift = 8 - 5
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 4:
const shift = 8 - 4
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 3:
const shift = 8 - 3
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 2:
const shift = 8 - 2
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
case 1:
const shift = 8 - 1
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>(56+shift))]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
default:
d.bufs.Put(bufs)
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 4, so uint8 is fine
bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
shift := (8 - d.actualTableLog) & 7
for bitsLeft > 0 {
if br.bitsRead >= 64-8 {
for br.off > 0 {
br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
br.bitsRead -= 8
br.off--
}
}
if len(dst) >= maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
v := dt[br.peekByteFast()>>shift]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
var br bitReaderBytes
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
const shift = 56
//fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog)
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[uint8(br.value>>shift)]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 4, so uint8 is fine
bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead))
for bitsLeft > 0 {
if br.bitsRead >= 64-8 {
for br.off > 0 {
br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
br.bitsRead -= 8
br.off--
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := dt[br.peekByteFast()]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if d.actualTableLog == 8 {
return d.decompress4X8bitExactly(dst, src)
}
var br [4]bitReaderBytes
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
shift := (56 + (8 - d.actualTableLog)) & 63
const tlSize = 1 << 8
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
br1 := &br[stream]
br2 := &br[stream2]
br1.fillFast()
br2.fillFast()
v := single[uint8(br1.value>>shift)].entry
v2 := single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off] = uint8(v >> 8)
buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+1] = uint8(v >> 8)
buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+2] = uint8(v >> 8)
buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+3] = uint8(v >> 8)
buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
br1 := &br[stream]
br2 := &br[stream2]
br1.fillFast()
br2.fillFast()
v := single[uint8(br1.value>>shift)].entry
v2 := single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off] = uint8(v >> 8)
buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+1] = uint8(v >> 8)
buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+2] = uint8(v >> 8)
buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+3] = uint8(v >> 8)
buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
// There must at least be 3 buffers left.
if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
//copy(out, buf[0][:])
//copy(out[dstEvery:], buf[1][:])
//copy(out[dstEvery*2:], buf[2][:])
*(*[bufoff]byte)(out) = buf[0]
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
out = out[bufoff:]
decoded += bufoff * 4
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value |= uint64(low) << (br.bitsRead - 32)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
br.bitsRead -= 8
br.off--
}
}
}
// end inline...
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
v := single[uint8(br.value>>shift)].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
d.bufs.Put(buf)
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
var br [4]bitReaderBytes
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const shift = 56
const tlSize = 1 << 8
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
}
{
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
br1 := &br[stream]
br2 := &br[stream2]
br1.fillFast()
br2.fillFast()
v := single[uint8(br1.value>>shift)].entry
v2 := single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off] = uint8(v >> 8)
buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+1] = uint8(v >> 8)
buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+2] = uint8(v >> 8)
buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+3] = uint8(v >> 8)
buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
br1 := &br[stream]
br2 := &br[stream2]
br1.fillFast()
br2.fillFast()
v := single[uint8(br1.value>>shift)].entry
v2 := single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off] = uint8(v >> 8)
buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+1] = uint8(v >> 8)
buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+2] = uint8(v >> 8)
buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
br1.bitsRead += uint8(v)
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
buf[stream][off+3] = uint8(v >> 8)
buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
if off == 0 {
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
// There must at least be 3 buffers left.
if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
//copy(out, buf[0][:])
//copy(out[dstEvery:], buf[1][:])
//copy(out[dstEvery*2:], buf[2][:])
// copy(out[dstEvery*3:], buf[3][:])
*(*[bufoff]byte)(out) = buf[0]
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
out = out[bufoff:]
decoded += bufoff * 4
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value |= uint64(low) << (br.bitsRead - 32)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8)
br.bitsRead -= 8
br.off--
}
}
}
// end inline...
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
v := single[br.peekByteFast()].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
d.bufs.Put(buf)
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// matches will compare a decoding table to a coding table.
// Errors are written to the writer.
// Nothing will be written if table is ok.
func (s *Scratch) matches(ct cTable, w io.Writer) {
if s == nil || len(s.dt.single) == 0 {
return
}
dt := s.dt.single[:1<<s.actualTableLog]
tablelog := s.actualTableLog
ok := 0
broken := 0
for sym, enc := range ct {
errs := 0
broken++
if enc.nBits == 0 {
for _, dec := range dt {
if uint8(dec.entry>>8) == byte(sym) {
fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
errs++
break
}
}
if errs == 0 {
broken--
}
continue
}
// Unused bits in input
ub := tablelog - enc.nBits
top := enc.val << ub
// decoder looks at top bits.
dec := dt[top]
if uint8(dec.entry) != enc.nBits {
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
errs++
}
if uint8(dec.entry>>8) != uint8(sym) {
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
errs++
}
if errs > 0 {
fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
for i := uint16(0); i < (1 << ub); i++ {
vval := top | i
dec := dt[vval]
if uint8(dec.entry) != enc.nBits {
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
errs++
}
if uint8(dec.entry>>8) != uint8(sym) {
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
errs++
}
if errs > 20 {
fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
if errs == 0 {
ok++
broken--
}
}
if broken > 0 {
fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok)
}
}
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
// and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0
import (
"errors"
"fmt"
"github.com/klauspost/compress/internal/cpuinfo"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
//
//go:noescape
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
//
//go:noescape
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
type decompress4xContext struct {
pbr *[4]bitReaderShifted
peekBits uint8
out *byte
dstEvery int
tbl *dEntrySingle
decoded int
limit *byte
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if len(src) < 6+(4*1) {
return nil, errors.New("input too small")
}
use8BitTables := d.actualTableLog <= 8
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
for i := 0; i < 3; i++ {
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
if start+length >= len(src) {
return nil, errors.New("truncated input (or invalid offset)")
}
err := br[i].init(src[start : start+length])
if err != nil {
return nil, err
}
start += length
}
err := br[3].init(src[start:])
if err != nil {
return nil, err
}
// destination, offset to match first output
dstSize := cap(dst)
dst = dst[:dstSize]
out := dst
dstEvery := (dstSize + 3) / 4
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
var decoded int
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
pbr: &br,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
out: &out[0],
dstEvery: dstEvery,
tbl: &single[0],
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
}
if use8BitTables {
decompress4x_8b_main_loop_amd64(&ctx)
} else {
decompress4x_main_loop_amd64(&ctx)
}
decoded = ctx.decoded
out = out[decoded/4:]
}
// Decode remaining.
remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
endsAt := offset + remainBytes
if endsAt > len(out) {
endsAt = len(out)
}
br := &br[i]
bitsLeft := br.remaining()
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
return nil, errors.New("corruption detected: stream overrun 4")
}
// Read value and increment offset.
val := br.peekBitsFast(d.actualTableLog)
v := single[val&tlMask].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
if offset != endsAt {
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
return nil, err
}
}
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
//
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
//
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
type decompress1xContext struct {
pbr *bitReaderShifted
peekBits uint8
out *byte
outCap int
tbl *dEntrySingle
decoded int
}
// Error reported by asm implementations
const error_max_decoded_size_exeeded = -1
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:maxDecodedSize]
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
if maxDecodedSize >= 4 {
ctx := decompress1xContext{
pbr: &br,
out: &dst[0],
outCap: maxDecodedSize,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
tbl: &d.dt.single[0],
}
if cpuinfo.HasBMI2() {
decompress1x_main_loop_bmi2(&ctx)
} else {
decompress1x_main_loop_amd64(&ctx)
}
if ctx.decoded == error_max_decoded_size_exeeded {
return nil, ErrMaxDecodedSizeExceeded
}
dst = dst[:ctx.decoded]
}
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if len(dst) >= maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
return dst, br.close()
}
// Package huff0 provides fast huffman encoding as used in zstd.
//
// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details.
package huff0
import (
"errors"
"fmt"
"math"
"math/bits"
"sync"
"github.com/klauspost/compress/fse"
)
const (
maxSymbolValue = 255
// zstandard limits tablelog to 11, see:
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description
tableLogMax = 11
tableLogDefault = 11
minTablelog = 5
huffNodesLen = 512
// BlockSizeMax is maximum input size for a single block uncompressed.
BlockSizeMax = 1<<18 - 1
)
var (
// ErrIncompressible is returned when input is judged to be too hard to compress.
ErrIncompressible = errors.New("input is not compressible")
// ErrUseRLE is returned from the compressor when the input is a single byte value repeated.
ErrUseRLE = errors.New("input is single value repeated")
// ErrTooBig is return if input is too large for a single block.
ErrTooBig = errors.New("input too big")
// ErrMaxDecodedSizeExceeded is return if input is too large for a single block.
ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded")
)
type ReusePolicy uint8
const (
// ReusePolicyAllow will allow reuse if it produces smaller output.
ReusePolicyAllow ReusePolicy = iota
// ReusePolicyPrefer will re-use aggressively if possible.
// This will not check if a new table will produce smaller output,
// except if the current table is impossible to use or
// compressed output is bigger than input.
ReusePolicyPrefer
// ReusePolicyNone will disable re-use of tables.
// This is slightly faster than ReusePolicyAllow but may produce larger output.
ReusePolicyNone
// ReusePolicyMust must allow reuse and produce smaller output.
ReusePolicyMust
)
type Scratch struct {
count [maxSymbolValue + 1]uint32
// Per block parameters.
// These can be used to override compression parameters of the block.
// Do not touch, unless you know what you are doing.
// Out is output buffer.
// If the scratch is re-used before the caller is done processing the output,
// set this field to nil.
// Otherwise the output buffer will be re-used for next Compression/Decompression step
// and allocation will be avoided.
Out []byte
// OutTable will contain the table data only, if a new table has been generated.
// Slice of the returned data.
OutTable []byte
// OutData will contain the compressed data.
// Slice of the returned data.
OutData []byte
// MaxDecodedSize will set the maximum allowed output size.
// This value will automatically be set to BlockSizeMax if not set.
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
srcLen int
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
// TableLog will attempt to override the tablelog for the next block.
// Must be <= 11 and >= 5.
TableLog uint8
// Reuse will specify the reuse policy
Reuse ReusePolicy
// WantLogLess allows to specify a log 2 reduction that should at least be achieved,
// otherwise the block will be returned as incompressible.
// The reduction should then at least be (input size >> WantLogLess)
// If WantLogLess == 0 any improvement will do.
WantLogLess uint8
symbolLen uint16 // Length of active part of the symbol table.
maxCount int // count of the most probable symbol
clearCount bool // clear count
actualTableLog uint8 // Selected tablelog.
prevTableLog uint8 // Tablelog for previous table
prevTable cTable // Table used for previous compression.
cTable cTable // compression table
dt dTable // decompression table
nodes []nodeElt
tmpOut [4][]byte
fse *fse.Scratch
decPool sync.Pool // *[4][256]byte buffers.
huffWeight [maxSymbolValue + 1]byte
}
// TransferCTable will transfer the previously used compression table.
func (s *Scratch) TransferCTable(src *Scratch) {
if cap(s.prevTable) < len(src.prevTable) {
s.prevTable = make(cTable, 0, maxSymbolValue+1)
}
s.prevTable = s.prevTable[:len(src.prevTable)]
copy(s.prevTable, src.prevTable)
s.prevTableLog = src.prevTableLog
}
func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if len(in) > BlockSizeMax {
return nil, ErrTooBig
}
if s == nil {
s = &Scratch{}
}
if s.MaxSymbolValue == 0 {
s.MaxSymbolValue = maxSymbolValue
}
if s.TableLog == 0 {
s.TableLog = tableLogDefault
}
if s.TableLog > tableLogMax || s.TableLog < minTablelog {
return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
}
if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
s.MaxDecodedSize = BlockSizeMax
}
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
if cap(s.Out) == 0 {
s.Out = make([]byte, 0, len(in))
}
s.Out = s.Out[:0]
s.OutTable = nil
s.OutData = nil
if cap(s.nodes) < huffNodesLen+1 {
s.nodes = make([]nodeElt, 0, huffNodesLen+1)
}
s.nodes = s.nodes[:0]
if s.fse == nil {
s.fse = &fse.Scratch{}
}
s.srcLen = len(in)
return s, nil
}
type cTable []cTableEntry
func (c cTable) write(s *Scratch) error {
var (
// precomputed conversion table
bitsToWeight [tableLogMax + 1]byte
huffLog = s.actualTableLog
// last weight is not saved.
maxSymbolValue = uint8(s.symbolLen - 1)
huffWeight = s.huffWeight[:256]
)
const (
maxFSETableLog = 6
)
// convert to weight
bitsToWeight[0] = 0
for n := uint8(1); n < huffLog+1; n++ {
bitsToWeight[n] = huffLog + 1 - n
}
// Acquire histogram for FSE.
hist := s.fse.Histogram()
hist = hist[:256]
for i := range hist[:16] {
hist[i] = 0
}
for n := uint8(0); n < maxSymbolValue; n++ {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
}
// FSE compress if feasible.
if maxSymbolValue >= 2 {
huffMaxCnt := uint32(0)
huffMax := uint8(0)
for i, v := range hist[:16] {
if v == 0 {
continue
}
huffMax = byte(i)
if v > huffMaxCnt {
huffMaxCnt = v
}
}
s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
s.fse.TableLog = maxFSETableLog
b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
if err == nil && len(b) < int(s.symbolLen>>1) {
s.Out = append(s.Out, uint8(len(b)))
s.Out = append(s.Out, b...)
return nil
}
// Unable to compress (RLE/uncompressible)
}
// write raw values as 4-bits (max : 15)
if maxSymbolValue > (256 - 128) {
// should not happen : likely means source cannot be compressed
return ErrIncompressible
}
op := s.Out
// special case, pack weights 4 bits/weight.
op = append(op, 128|(maxSymbolValue-1))
// be sure it doesn't cause msan issue in final combination
huffWeight[maxSymbolValue] = 0
for n := uint16(0); n < uint16(maxSymbolValue); n += 2 {
op = append(op, (huffWeight[n]<<4)|huffWeight[n+1])
}
s.Out = op
return nil
}
func (c cTable) estTableSize(s *Scratch) (sz int, err error) {
var (
// precomputed conversion table
bitsToWeight [tableLogMax + 1]byte
huffLog = s.actualTableLog
// last weight is not saved.
maxSymbolValue = uint8(s.symbolLen - 1)
huffWeight = s.huffWeight[:256]
)
const (
maxFSETableLog = 6
)
// convert to weight
bitsToWeight[0] = 0
for n := uint8(1); n < huffLog+1; n++ {
bitsToWeight[n] = huffLog + 1 - n
}
// Acquire histogram for FSE.
hist := s.fse.Histogram()
hist = hist[:256]
for i := range hist[:16] {
hist[i] = 0
}
for n := uint8(0); n < maxSymbolValue; n++ {
v := bitsToWeight[c[n].nBits] & 15
huffWeight[n] = v
hist[v]++
}
// FSE compress if feasible.
if maxSymbolValue >= 2 {
huffMaxCnt := uint32(0)
huffMax := uint8(0)
for i, v := range hist[:16] {
if v == 0 {
continue
}
huffMax = byte(i)
if v > huffMaxCnt {
huffMaxCnt = v
}
}
s.fse.HistogramFinished(huffMax, int(huffMaxCnt))
s.fse.TableLog = maxFSETableLog
b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse)
if err == nil && len(b) < int(s.symbolLen>>1) {
sz += 1 + len(b)
return sz, nil
}
// Unable to compress (RLE/uncompressible)
}
// write raw values as 4-bits (max : 15)
if maxSymbolValue > (256 - 128) {
// should not happen : likely means source cannot be compressed
return 0, ErrIncompressible
}
// special case, pack weights 4 bits/weight.
sz += 1 + int(maxSymbolValue/2)
return sz, nil
}
// estimateSize returns the estimated size in bytes of the input represented in the
// histogram supplied.
func (c cTable) estimateSize(hist []uint32) int {
nbBits := uint32(7)
for i, v := range c[:len(hist)] {
nbBits += uint32(v.nBits) * hist[i]
}
return int(nbBits >> 3)
}
// minSize returns the minimum possible size considering the shannon limit.
func (s *Scratch) minSize(total int) int {
nbBits := float64(7)
fTotal := float64(total)
for _, v := range s.count[:s.symbolLen] {
n := float64(v)
if n > 0 {
nbBits += math.Log2(fTotal/n) * n
}
}
return int(nbBits) >> 3
}
func highBit32(val uint32) (n uint32) {
return uint32(bits.Len32(val) - 1)
}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"errors"
"fmt"
"strconv"
"github.com/klauspost/compress/internal/race"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("s2: corrupt input")
// ErrCRC reports that the input failed CRC validation (streams only)
ErrCRC = errors.New("s2: corrupt input, crc mismatch")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("s2: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("s2: unsupported input")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= cap(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
race.WriteSlice(dst)
race.ReadSlice(src[s:])
if s2Decode(dst, src[s:]) != 0 {
return nil, ErrCorrupt
}
return dst, nil
}
// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func s2DecodeDict(dst, src []byte, dict *Dict) int {
if dict == nil {
return s2Decode(dst, src)
}
const debug = false
const debugErrs = debug
if debug {
fmt.Println("Starting decode, dst len:", len(dst))
}
var d, s, length int
offset := len(dict.dict) - dict.repeat
// As long as we can read at least 5 bytes...
for s < len(src)-5 {
// Removing bounds checks is SLOWER, when if doing
// in := src[s:s+5]
// Checked on Go 1.18
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
x = uint32(src[s-1])
case x == 61:
in := src[s : s+3]
x = uint32(in[1]) | uint32(in[2])<<8
s += 3
case x == 62:
in := src[s : s+4]
// Load as 32 bit and shift down.
x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
x >>= 8
s += 4
case x == 63:
in := src[s : s+5]
x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
s += 5
}
length = int(x) + 1
if debug {
fmt.Println("literals, length:", length, "d-after:", d+length)
}
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
if debugErrs {
fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
}
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
length = int(src[s-2]) >> 2 & 0x7
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
}
// keep last offset
switch length {
case 5:
length = int(src[s]) + 4
s += 1
case 6:
in := src[s : s+2]
length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
s += 2
case 7:
in := src[s : s+3]
length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
s += 3
default: // 0-> 4
}
} else {
offset = toffset
}
length += 4
case tagCopy2:
in := src[s : s+3]
offset = int(uint32(in[1]) | uint32(in[2])<<8)
length = 1 + int(in[0])>>2
s += 3
case tagCopy4:
in := src[s : s+5]
offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
length = 1 + int(in[0])>>2
s += 5
}
if offset <= 0 || length > len(dst)-d {
if debugErrs {
fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
}
return decodeErrCodeCorrupt
}
// copy from dict
if d < offset {
if d > MaxDictSrcOffset {
if debugErrs {
fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
}
return decodeErrCodeCorrupt
}
startOff := len(dict.dict) - offset + d
if startOff < 0 || startOff+length > len(dict.dict) {
if debugErrs {
fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
}
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
}
copy(dst[d:d+length], dict.dict[startOff:])
d += length
continue
}
if debug {
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
// Remaining with extra checks...
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
if debugErrs {
fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
}
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("literals, length:", length, "d-after:", d+length)
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(src[s-2]) >> 2 & 0x7
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
}
// keep last offset
switch length {
case 5:
s += 1
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-1])) + 4
case 6:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
case 7:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
default: // 0-> 4
}
} else {
offset = toffset
}
length += 4
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || length > len(dst)-d {
if debugErrs {
fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
}
return decodeErrCodeCorrupt
}
// copy from dict
if d < offset {
if d > MaxDictSrcOffset {
if debugErrs {
fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
}
return decodeErrCodeCorrupt
}
rOff := len(dict.dict) - (offset - d)
if debug {
fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
}
if rOff+length > len(dict.dict) {
if debugErrs {
fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
}
return decodeErrCodeCorrupt
}
if rOff < 0 {
if debugErrs {
fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
}
return decodeErrCodeCorrupt
}
copy(dst[d:d+length], dict.dict[rOff:])
d += length
continue
}
if debug {
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
if d != len(dst) {
if debugErrs {
fmt.Println("wanted length", len(dst), "got", d)
}
return decodeErrCodeCorrupt
}
return 0
}
// Copyright (c) 2022+ Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"encoding/binary"
"sync"
)
const (
// MinDictSize is the minimum dictionary size when repeat has been read.
MinDictSize = 16
// MaxDictSize is the maximum dictionary size when repeat has been read.
MaxDictSize = 65536
// MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
MaxDictSrcOffset = 65535
)
// Dict contains a dictionary that can be used for encoding and decoding s2
type Dict struct {
dict []byte
repeat int // Repeat as index of dict
fast, better, best sync.Once
fastTable *[1 << 14]uint16
betterTableShort *[1 << 14]uint16
betterTableLong *[1 << 17]uint16
bestTableShort *[1 << 16]uint32
bestTableLong *[1 << 19]uint32
}
// NewDict will read a dictionary.
// It will return nil if the dictionary is invalid.
func NewDict(dict []byte) *Dict {
if len(dict) == 0 {
return nil
}
var d Dict
// Repeat is the first value of the dict
r, n := binary.Uvarint(dict)
if n <= 0 {
return nil
}
dict = dict[n:]
d.dict = dict
if cap(d.dict) < len(d.dict)+16 {
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
}
if len(dict) < MinDictSize || len(dict) > MaxDictSize {
return nil
}
d.repeat = int(r)
if d.repeat > len(dict) {
return nil
}
return &d
}
// Bytes will return a serialized version of the dictionary.
// The output can be sent to NewDict.
func (d *Dict) Bytes() []byte {
dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
}
// MakeDict will create a dictionary.
// 'data' must be at least MinDictSize.
// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
// If searchStart is set the start repeat value will be set to the last
// match of this content.
// If no matches are found, it will attempt to find shorter matches.
// This content should match the typical start of a block.
// If at least 4 bytes cannot be matched, repeat is set to start of block.
func MakeDict(data []byte, searchStart []byte) *Dict {
if len(data) == 0 {
return nil
}
if len(data) > MaxDictSize {
data = data[len(data)-MaxDictSize:]
}
var d Dict
dict := data
d.dict = dict
if cap(d.dict) < len(d.dict)+16 {
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
}
if len(dict) < MinDictSize {
return nil
}
// Find the longest match possible, last entry if multiple.
for s := len(searchStart); s > 4; s-- {
if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
d.repeat = idx
break
}
}
return &d
}
// MakeDictManual will create a dictionary.
// 'data' must be at least MinDictSize and less than or equal to MaxDictSize.
// A manual first repeat index into data must be provided.
// It must be less than len(data)-8.
func MakeDictManual(data []byte, firstIdx uint16) *Dict {
if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize {
return nil
}
var d Dict
dict := data
d.dict = dict
if cap(d.dict) < len(d.dict)+16 {
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
}
d.repeat = int(firstIdx)
return &d
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func (d *Dict) Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
dstP := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:dstP]
}
if len(src) < minNonLiteralBlockSize {
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
n := encodeBlockDictGo(dst[dstP:], src, d)
if n > 0 {
dstP += n
return dst[:dstP]
}
// Not compressible
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// EncodeBetter compresses better than Encode but typically with a
// 10-40% speed decrease on both compression and decompression.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func (d *Dict) EncodeBetter(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
dstP := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:dstP]
}
if len(src) < minNonLiteralBlockSize {
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
n := encodeBlockBetterDict(dst[dstP:], src, d)
if n > 0 {
dstP += n
return dst[:dstP]
}
// Not compressible
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
// EncodeBest returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// EncodeBest compresses as good as reasonably possible but with a
// big speed decrease.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func (d *Dict) EncodeBest(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
dstP := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:dstP]
}
if len(src) < minNonLiteralBlockSize {
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
n := encodeBlockBest(dst[dstP:], src, d)
if n > 0 {
dstP += n
return dst[:dstP]
}
// Not compressible
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= cap(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
if s2DecodeDict(dst, src[s:], d) != 0 {
return nil, ErrCorrupt
}
return dst, nil
}
func (d *Dict) initFast() {
d.fast.Do(func() {
const (
tableBits = 14
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint16
// We stop so any entry of length 8 can always be read.
for i := 0; i < len(d.dict)-8-2; i += 3 {
x0 := load64(d.dict, i)
h0 := hash6(x0, tableBits)
h1 := hash6(x0>>8, tableBits)
h2 := hash6(x0>>16, tableBits)
table[h0] = uint16(i)
table[h1] = uint16(i + 1)
table[h2] = uint16(i + 2)
}
d.fastTable = &table
})
}
func (d *Dict) initBetter() {
d.better.Do(func() {
const (
// Long hash matches.
lTableBits = 17
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint16
var sTable [maxSTableSize]uint16
// We stop so any entry of length 8 can always be read.
for i := 0; i < len(d.dict)-8; i++ {
cv := load64(d.dict, i)
lTable[hash7(cv, lTableBits)] = uint16(i)
sTable[hash4(cv, sTableBits)] = uint16(i)
}
d.betterTableShort = &sTable
d.betterTableLong = &lTable
})
}
func (d *Dict) initBest() {
d.best.Do(func() {
const (
// Long hash matches.
lTableBits = 19
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 16
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// We stop so any entry of length 8 can always be read.
for i := 0; i < len(d.dict)-8; i++ {
cv := load64(d.dict, i)
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
candidateS := sTable[hashS]
lTable[hashL] = uint32(i) | candidateL<<16
sTable[hashS] = uint32(i) | candidateS<<16
}
d.bestTableShort = &sTable
d.bestTableLong = &lTable
})
}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"math"
"math/bits"
"sync"
"github.com/klauspost/compress/internal/race"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlock(dst[d:], src)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
var estblockPool [2]sync.Pool
// EstimateBlockSize will perform a very fast compression
// without outputting the result and return the compressed output size.
// The function returns -1 if no improvement could be achieved.
// Using actual compression will most often produce better compression than the estimate.
func EstimateBlockSize(src []byte) (d int) {
if len(src) <= inputMargin || int64(len(src)) > 0xffffffff {
return -1
}
if len(src) <= 1024 {
const sz, pool = 2048, 0
tmp, ok := estblockPool[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer estblockPool[pool].Put(tmp)
d = calcBlockSizeSmall(src, tmp)
} else {
const sz, pool = 32768, 1
tmp, ok := estblockPool[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer estblockPool[pool].Put(tmp)
d = calcBlockSize(src, tmp)
}
if d == 0 {
return -1
}
// Size of the varint encoded block size.
d += (bits.Len64(uint64(len(src))) + 7) / 7
if d >= len(src) {
return -1
}
return d
}
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// EncodeBetter compresses better than Encode but typically with a
// 10-40% speed decrease on both compression and decompression.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func EncodeBetter(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockBetter(dst[d:], src)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// EncodeBest returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// EncodeBest compresses as good as reasonably possible but with a
// big speed decrease.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func EncodeBest(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockBest(dst[d:], src, nil)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// EncodeSnappy returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The output is Snappy compatible and will likely decompress faster.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func EncodeSnappy(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockSnappy(dst[d:], src)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The output is Snappy compatible and will likely decompress faster.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func EncodeSnappyBetter(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockBetterSnappy(dst[d:], src)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The output is Snappy compatible and will likely decompress faster.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func EncodeSnappyBest(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockBestSnappy(dst[d:], src)
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination.
// If the destination is nil or too small, a new will be allocated.
// The blocks are not validated, so garbage in = garbage out.
// dst may not overlap block data.
// Any data in dst is preserved as is, so it will not be considered a block.
func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) {
totalSize := uint64(0)
compSize := 0
for _, b := range blocks {
l, hdr, err := decodedLen(b)
if err != nil {
return nil, err
}
totalSize += uint64(l)
compSize += len(b) - hdr
}
if totalSize == 0 {
dst = append(dst, 0)
return dst, nil
}
if totalSize > math.MaxUint32 {
return nil, ErrTooLarge
}
var tmp [binary.MaxVarintLen32]byte
hdrSize := binary.PutUvarint(tmp[:], totalSize)
wantSize := hdrSize + compSize
if cap(dst)-len(dst) < wantSize {
dst = append(make([]byte, 0, wantSize+len(dst)), dst...)
}
dst = append(dst, tmp[:hdrSize]...)
for _, b := range blocks {
_, hdr, err := decodedLen(b)
if err != nil {
return nil, err
}
dst = append(dst, b[hdr:]...)
}
return dst, nil
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 8
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// will be accepted by the encoder.
const minNonLiteralBlockSize = 32
const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits)
// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size.
// Blocks this big are highly discouraged, though.
// Half the size on 32 bit systems.
const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
// 32 bit platforms will have lower thresholds for rejecting big content.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if intReduction == 1 {
// 32 bits
if n > math.MaxInt32 {
// Also includes negative.
return -1
}
} else if n > 0xffffffff {
// 64 bits
// Also includes negative.
return -1
}
// Size of the varint encoded block size.
n = n + uint64((bits.Len64(n)+7)/7)
// Add maximum size of encoding block as literals.
n += uint64(literalExtraSize(int64(srcLen)))
if intReduction == 1 {
// 32 bits
if n > math.MaxInt32 {
return -1
}
} else if n > 0xffffffff {
// 64 bits
// Also includes negative.
return -1
}
return int(n)
}
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"encoding/binary"
"fmt"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
func load32(b []byte, i int) uint32 {
return le.Load32(b, i)
}
func load64(b []byte, i int) uint64 {
return le.Load64(b, i)
}
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash6(u uint64, h uint8) uint32 {
const prime6bytes = 227718039650203
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
}
func encodeGo(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:d]
}
if len(src) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
var n int
if len(src) < 64<<10 {
n = encodeBlockGo64K(dst[d:], src)
} else {
n = encodeBlockGo(dst[d:], src)
}
if n > 0 {
d += n
return dst[:d]
}
// Not compressible
d += emitLiteral(dst[d:], src)
return dst[:d]
}
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockGo(dst, src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
debug = false
)
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
// Bail if we exceed the maximum size.
if d+(base-nextEmit) > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockGo64K is a specialized version for compressing blocks <= 64KB
func encodeBlockGo64K(dst, src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
debug = false
)
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>5 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint16(s)
table[hash1] = uint16(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
// Bail if we exceed the maximum size.
if d+(base-nextEmit) > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint16(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint16(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint16(s - 2)
table[currHash] = uint16(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
func encodeBlockSnappyGo(dst, src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
// Bail if we exceed the maximum size.
if d+(base-nextEmit) > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
if false {
// Validate match.
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockSnappyGo64K is a special version of encodeBlockSnappyGo for sizes <64KB
func encodeBlockSnappyGo64K(dst, src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>5 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint16(s)
table[hash1] = uint16(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
// Bail if we exceed the maximum size.
if d+(base-nextEmit) > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint16(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint16(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
if false {
// Validate match.
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint16(s - 2)
table[currHash] = uint16(s)
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
maxAhead = 8 // maximum bytes ahead without checking sLimit
debug = false
)
dict.initFast()
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if sLimit > MaxDictSrcOffset-maxAhead {
sLimit = MaxDictSrcOffset - maxAhead
}
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form can start with a dict entry (copy or repeat).
s := 0
// Convert dict repeat to offset
repeat := len(dict.dict) - dict.repeat
cv := load64(src, 0)
// While in dict
searchDict:
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
if nextS > sLimit {
if debug {
fmt.Println("slimit reached", s, nextS)
}
break searchDict
}
candidateDict := int(dict.fastTable[hash0])
candidateDict2 := int(dict.fastTable[hash1])
candidate2 := int(table[hash1])
candidate := int(table[hash0])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if repeat > s {
candidate := len(dict.dict) - repeat + s
if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
// Extend back
base := s
for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
i--
base--
}
// Bail if we exceed the maximum size.
if d+(base-nextEmit) > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
s += 4
candidate += 4
for candidate < len(dict.dict)-8 && s <= len(src)-8 {
if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitRepeat(dst[d:], repeat, s-base)
if debug {
fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
cv = load64(src, s)
continue
}
} else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if debug {
fmt.Println("emitted reg repeat", s-base, "s:", s)
}
cv = load64(src, s)
continue searchDict
}
if s == 0 {
cv = load64(src, nextS)
s = nextS
continue searchDict
}
// Start with table. These matches will always be closer.
if uint32(cv) == load32(src, candidate) {
goto emitMatch
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
goto emitMatch
}
// Check dict. Dicts have longer offsets, so we want longer matches.
if cv == load64(dict.dict, candidateDict) {
table[hash2] = uint32(s + 2)
goto emitDict
}
candidateDict = int(dict.fastTable[hash2])
// Check if upper 7 bytes match
if candidateDict2 >= 1 {
if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
table[hash2] = uint32(s + 2)
candidateDict = candidateDict2
s++
goto emitDict
}
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
goto emitMatch
}
if candidateDict >= 2 {
// Check if upper 6 bytes match
if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
s += 2
goto emitDict
}
}
cv = load64(src, nextS)
s = nextS
continue searchDict
emitDict:
{
if debug {
if load32(dict.dict, candidateDict) != load32(src, s) {
panic("dict emit mismatch")
}
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
candidateDict--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
{
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = s + (len(dict.dict)) - candidateDict
// Extend the 4-byte match as long as possible.
s += 4
candidateDict += 4
for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateDict += 8
}
// Matches longer than 64 are split.
if s <= sLimit || s-base < 8 {
d += emitCopy(dst[d:], repeat, s-base)
} else {
// Split to ensure we don't start a copy within next block
d += emitCopy(dst[d:], repeat, 4)
d += emitRepeat(dst[d:], repeat, s-base-4)
}
if false {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := dict.dict[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if debug {
fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index and continue loop to try new candidate.
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>8, tableBits)
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s - 1)
cv = load64(src, s)
}
continue
}
emitMatch:
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if debug {
fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
// Search without dict:
if repeat > s {
repeat = 0
}
// No more dict
sLimit = len(src) - inputMargin
if s >= sLimit {
goto emitRemainder
}
if debug {
fmt.Println("non-dict matching at", s, "repeat:", repeat)
}
cv = load64(src, s)
if debug {
fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
}
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
// Bail if we exceed the maximum size.
if d+(base-nextEmit) > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
if debug {
fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if debug {
fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
if debug && nextEmit != s {
fmt.Println("emitted ", len(src)-nextEmit, "literals")
}
}
return d
}
//go:build !appengine && !noasm && gc
// +build !appengine,!noasm,gc
package s2
import (
"sync"
"github.com/klauspost/compress/internal/race"
)
const hasAmd64Asm = true
var encPools [4]sync.Pool
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) >= 4<<20 {
const sz, pool = 65536, 0
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
const sz, pool = 65536, 0
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
const sz, pool = 16384, 1
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
const sz, pool = 4096, 2
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
const sz, pool = 1024, 3
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeBlockAsm8B(dst, src, tmp)
}
var encBetterPools [5]sync.Pool
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetter(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) > 4<<20 {
const sz, pool = 589824, 0
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeBetterBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
const sz, pool = 589824, 0
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeBetterBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
const sz, pool = 81920, 0
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeBetterBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
const sz, pool = 20480, 1
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeBetterBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
const sz, pool = 5120, 2
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeBetterBlockAsm8B(dst, src, tmp)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockSnappy(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) > 65536 {
const sz, pool = 65536, 0
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeSnappyBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
const sz, pool = 65536, 0
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeSnappyBlockAsm64K(dst, src, tmp)
}
if len(src) >= limit10B {
const sz, pool = 16384, 1
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeSnappyBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
const sz, pool = 4096, 2
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeSnappyBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
const sz, pool = 1024, 3
tmp, ok := encPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encPools[pool].Put(tmp)
return encodeSnappyBlockAsm8B(dst, src, tmp)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
race.ReadSlice(src)
race.WriteSlice(dst)
const (
// Use 12 bit table when less than...
limit12B = 16 << 10
// Use 10 bit table when less than...
limit10B = 4 << 10
// Use 8 bit table when less than...
limit8B = 512
)
if len(src) > 65536 {
const sz, pool = 589824, 0
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeSnappyBetterBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
const sz, pool = 294912, 4
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeSnappyBetterBlockAsm64K(dst, src, tmp)
}
if len(src) >= limit10B {
const sz, pool = 81920, 0
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeSnappyBetterBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
const sz, pool = 20480, 1
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeSnappyBetterBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
const sz, pool = 5120, 2
tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
if !ok {
tmp = &[sz]byte{}
}
race.WriteSlice(tmp[:])
defer encBetterPools[pool].Put(tmp)
return encodeSnappyBetterBlockAsm8B(dst, src, tmp)
}
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"fmt"
"math"
"math/bits"
)
// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 19
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 16
maxSTableSize = 1 << sTableBits
inputMargin = 8 + 2
debug = false
)
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
sLimitDict := len(src) - inputMargin
if sLimitDict > MaxDictSrcOffset-inputMargin {
sLimitDict = MaxDictSrcOffset - inputMargin
}
var lTable [maxLTableSize]uint64
var sTable [maxSTableSize]uint64
// Bail if we can't compress to at least this.
dstLimit := len(src) - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
repeat := 1
if dict != nil {
dict.initBest()
s = 0
repeat = len(dict.dict) - dict.repeat
}
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
const lowbitMask = 0xffffffff
getCur := func(x uint64) int {
return int(x & lowbitMask)
}
getPrev := func(x uint64) int {
return int(x >> 32)
}
const maxSkip = 64
for {
type match struct {
offset int
s int
length int
score int
rep, dict bool
}
var best match
for {
// Next src position to check
nextS := (s-nextEmit)>>8 + 1
if nextS > maxSkip {
nextS = s + maxSkip
} else {
nextS += s
}
if nextS > sLimit {
goto emitRemainder
}
if dict != nil && s >= MaxDictSrcOffset {
dict = nil
if repeat > s {
repeat = math.MinInt32
}
}
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
candidateS := sTable[hashS]
score := func(m match) int {
// Matches that are longer forward are penalized since we must emit it as a literal.
score := m.length - m.s
if nextEmit == m.s {
// If we do not have to emit literals, we save 1 byte
score++
}
offset := m.s - m.offset
if m.rep {
return score - emitRepeatSize(offset, m.length)
}
return score - emitCopySize(offset, m.length)
}
matchAt := func(offset, s int, first uint32, rep bool) match {
if best.length != 0 && best.s-best.offset == s-offset {
// Don't retest if we have the same offset.
return match{offset: offset, s: s}
}
if load32(src, offset) != first {
return match{offset: offset, s: s}
}
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
s += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[m.length] {
m.length++
s++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
m.length -= offset
m.score = score(m)
if m.score <= -m.s {
// Eliminate if no savings, we might find a better one.
m.length = 0
}
return m
}
matchDict := func(candidate, s int, first uint32, rep bool) match {
if s >= MaxDictSrcOffset {
return match{offset: candidate, s: s}
}
// Calculate offset as if in continuous array with s
offset := -len(dict.dict) + candidate
if best.length != 0 && best.s-best.offset == s-offset && !rep {
// Don't retest if we have the same offset.
return match{offset: offset, s: s}
}
if load32(dict.dict, candidate) != first {
return match{offset: offset, s: s}
}
m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
s += 4
if !rep {
for s < sLimitDict && m.length < len(dict.dict) {
if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
if src[s] == dict.dict[m.length] {
m.length++
s++
continue
}
break
}
if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
} else {
for s < len(src) && m.length < len(dict.dict) {
if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
if src[s] == dict.dict[m.length] {
m.length++
s++
continue
}
break
}
if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
}
m.length -= candidate
m.score = score(m)
if m.score <= -m.s {
// Eliminate if no savings, we might find a better one.
m.length = 0
}
return m
}
bestOf := func(a, b match) match {
if b.length == 0 {
return a
}
if a.length == 0 {
return b
}
as := a.score + b.s
bs := b.score + a.s
if as >= bs {
return a
}
return b
}
if s > 0 {
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
}
if dict != nil {
candidateL := dict.bestTableLong[hashL]
candidateS := dict.bestTableShort[hashS]
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
}
{
if (dict == nil || repeat <= s) && repeat > 0 {
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
} else if s-repeat < -4 && dict != nil {
candidate := len(dict.dict) - (repeat - s)
best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
candidate++
best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
}
if best.length > 0 {
hashS := hash4(cv>>8, sTableBits)
// s+1
nextShort := sTable[hashS]
s := s + 1
cv := load64(src, s)
hashL := hash8(cv, lTableBits)
nextLong := lTable[hashL]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
// Dict at + 1
if dict != nil {
candidateL := dict.bestTableLong[hashL]
candidateS := dict.bestTableShort[hashS]
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
}
// s+2
if true {
hashS := hash4(cv>>8, sTableBits)
nextShort = sTable[hashS]
s++
cv = load64(src, s)
hashL := hash8(cv, lTableBits)
nextLong = lTable[hashL]
if (dict == nil || repeat <= s) && repeat > 0 {
// Repeat at + 2
best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
} else if repeat-s > 4 && dict != nil {
candidate := len(dict.dict) - (repeat - s)
best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
}
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
// Dict at +2
// Very small gain
if dict != nil {
candidateL := dict.bestTableLong[hashL]
candidateS := dict.bestTableShort[hashS]
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
}
}
// Search for a match at best match end, see if that is better.
// Allow some bytes at the beginning to mismatch.
// Sweet spot is around 1-2 bytes, but depends on input.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
const skipEnd = 1
if sAt := best.s + best.length - skipEnd; sAt < sLimit {
sBack := best.s + skipBeginning - skipEnd
backL := best.length - skipBeginning
// Load initial values
cv = load64(src, sBack)
// Grab candidates...
next := lTable[hash8(load64(src, sAt), lTableBits)]
if checkAt := getCur(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
if checkAt := getPrev(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
// Disabled: Extremely small gain
if false {
next = sTable[hash4(load64(src, sAt), sTableBits)]
if checkAt := getCur(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
if checkAt := getPrev(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
}
}
}
}
// Update table
lTable[hashL] = uint64(s) | candidateL<<32
sTable[hashS] = uint64(s) | candidateS<<32
if best.length > 0 {
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards, not needed for repeats...
s = best.s
if !best.rep && !best.dict {
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
best.offset--
best.length++
s--
}
}
if false && best.offset >= s {
panic(fmt.Errorf("t %d >= s %d", best.offset, s))
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := s - best.offset
s += best.length
if offset > 65535 && s-base <= 5 && !best.rep {
// Bail if the match is equal or worse to the encoding.
s = best.s + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if debug && nextEmit != base {
fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if best.rep {
if nextEmit > 0 || best.dict {
if debug {
fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
}
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], offset, best.length)
} else {
// First match without dict cannot be a repeat.
if debug {
fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
}
d += emitCopy(dst[d:], offset, best.length)
}
} else {
if debug {
fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
}
d += emitCopy(dst[d:], offset, best.length)
}
repeat = offset
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Fill tables...
for i := best.s + 1; i < s; i++ {
cv0 := load64(src, i)
long0 := hash8(cv0, lTableBits)
short0 := hash4(cv0, sTableBits)
lTable[long0] = uint64(i) | lTable[long0]<<32
sTable[short0] = uint64(i) | sTable[short0]<<32
}
cv = load64(src, s)
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
if debug && nextEmit != s {
fmt.Println("emitted ", len(src)-nextEmit, "literals")
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBestSnappy(dst, src []byte) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 19
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 16
maxSTableSize = 1 << sTableBits
inputMargin = 8 + 2
)
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
var lTable [maxLTableSize]uint64
var sTable [maxSTableSize]uint64
// Bail if we can't compress to at least this.
dstLimit := len(src) - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
const lowbitMask = 0xffffffff
getCur := func(x uint64) int {
return int(x & lowbitMask)
}
getPrev := func(x uint64) int {
return int(x >> 32)
}
const maxSkip = 64
for {
type match struct {
offset int
s int
length int
score int
}
var best match
for {
// Next src position to check
nextS := (s-nextEmit)>>8 + 1
if nextS > maxSkip {
nextS = s + maxSkip
} else {
nextS += s
}
if nextS > sLimit {
goto emitRemainder
}
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
candidateS := sTable[hashS]
score := func(m match) int {
// Matches that are longer forward are penalized since we must emit it as a literal.
score := m.length - m.s
if nextEmit == m.s {
// If we do not have to emit literals, we save 1 byte
score++
}
offset := m.s - m.offset
return score - emitCopyNoRepeatSize(offset, m.length)
}
matchAt := func(offset, s int, first uint32) match {
if best.length != 0 && best.s-best.offset == s-offset {
// Don't retest if we have the same offset.
return match{offset: offset, s: s}
}
if load32(src, offset) != first {
return match{offset: offset, s: s}
}
m := match{offset: offset, s: s, length: 4 + offset}
s += 4
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
m.length -= offset
m.score = score(m)
if m.score <= -m.s {
// Eliminate if no savings, we might find a better one.
m.length = 0
}
return m
}
bestOf := func(a, b match) match {
if b.length == 0 {
return a
}
if a.length == 0 {
return b
}
as := a.score + b.s
bs := b.score + a.s
if as >= bs {
return a
}
return b
}
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv)))
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv)))
{
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
if best.length > 0 {
// s+1
nextShort := sTable[hash4(cv>>8, sTableBits)]
s := s + 1
cv := load64(src, s)
nextLong := lTable[hash8(cv, lTableBits)]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
// Repeat at + 2
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
// s+2
if true {
nextShort = sTable[hash4(cv>>8, sTableBits)]
s++
cv = load64(src, s)
nextLong = lTable[hash8(cv, lTableBits)]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
}
// Search for a match at best match end, see if that is better.
if sAt := best.s + best.length; sAt < sLimit {
sBack := best.s
backL := best.length
// Load initial values
cv = load64(src, sBack)
// Search for mismatch
next := lTable[hash8(load64(src, sAt), lTableBits)]
//next := sTable[hash4(load64(src, sAt), sTableBits)]
if checkAt := getCur(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
}
if checkAt := getPrev(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
}
}
}
}
// Update table
lTable[hashL] = uint64(s) | candidateL<<32
sTable[hashS] = uint64(s) | candidateS<<32
if best.length > 0 {
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards, not needed for repeats...
s = best.s
if true {
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
best.offset--
best.length++
s--
}
}
if false && best.offset >= s {
panic(fmt.Errorf("t %d >= s %d", best.offset, s))
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := s - best.offset
s += best.length
if offset > 65535 && s-base <= 5 {
// Bail if the match is equal or worse to the encoding.
s = best.s + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
d += emitCopyNoRepeat(dst[d:], offset, best.length)
repeat = offset
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Fill tables...
for i := best.s + 1; i < s; i++ {
cv0 := load64(src, i)
long0 := hash8(cv0, lTableBits)
short0 := hash4(cv0, sTableBits)
lTable[long0] = uint64(i) | lTable[long0]<<32
sTable[short0] = uint64(i) | sTable[short0]<<32
}
cv = load64(src, s)
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// emitCopySize returns the size to encode the offset+length
//
// It assumes that:
//
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopySize(offset, length int) int {
if offset >= 65536 {
i := 0
if length > 64 {
length -= 64
if length >= 4 {
// Emit remaining as repeats
return 5 + emitRepeatSize(offset, length)
}
i = 5
}
if length == 0 {
return i
}
return i + 5
}
// Offset no more than 2 bytes.
if length > 64 {
if offset < 2048 {
// Emit 8 bytes, then rest as repeats...
return 2 + emitRepeatSize(offset, length-8)
}
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitRepeatSize(offset, length-60)
}
if length >= 12 || offset >= 2048 {
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
return 2
}
// emitCopyNoRepeatSize returns the size to encode the offset+length
//
// It assumes that:
//
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopyNoRepeatSize(offset, length int) int {
if offset >= 65536 {
return 5 + 5*(length/64)
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + 3*(length/60)
}
if length >= 12 || offset >= 2048 {
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
return 2
}
// emitRepeatSize returns the number of bytes required to encode a repeat.
// Length must be at least 4 and < 1<<24
func emitRepeatSize(offset, length int) int {
// Repeat offset, make length cheaper
if length <= 4+4 || (length < 8+4 && offset < 2048) {
return 2
}
if length < (1<<8)+4+4 {
return 3
}
if length < (1<<16)+(1<<8)+4 {
return 4
}
const maxRepeat = (1 << 24) - 1
length -= (1 << 16) - 4
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
}
if left > 0 {
return 5 + emitRepeatSize(offset, left)
}
return 5
}
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"fmt"
"math/bits"
)
// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash4(u uint64, h uint8) uint32 {
const prime4bytes = 2654435761
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
}
// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash5(u uint64, h uint8) uint32 {
const prime5bytes = 889523592379
return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
const prime7bytes = 58295818150454627
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
}
// hash8 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash8(u uint64, h uint8) uint32 {
const prime8bytes = 0xcf1bbcdcb7a56463
return uint32((u * prime8bytes) >> ((64 - h) & 63))
}
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterGo(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 17
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := 0
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>7 + 1
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
valLong := load64(src, candidateL)
valShort := load64(src, candidateS)
// If long matches at least 8 bytes, use that.
if cv == valLong {
break
}
if cv == valShort {
candidateL = candidateS
break
}
// Check repeat at offset checkRep.
const checkRep = 1
// Minimum length of a repeat. Tested with various values.
// While 4-5 offers improvements in some, 6 reduces
// regressions significantly.
const wantRepeatBytes = 6
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + wantRepeatBytes + checkRep
s += wantRepeatBytes + checkRep
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidate] {
s++
candidate++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// Index in-between
index0 := base + 1
index1 := s - 2
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 2
index1 -= 2
}
cv = load64(src, s)
continue
}
// Long likely matches 7, so take that.
if uint32(cv) == uint32(valLong) {
break
}
// Check our short candidate
if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if repeat == offset {
d += emitRepeat(dst[d:], offset, s-base)
} else {
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
// lTable could be postponed, but very minor difference.
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index2 += 2
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 16
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := 0
const maxSkip = 100
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = min(s+(s-nextEmit)>>7+1, s+maxSkip)
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
if uint32(cv) == load32(src, candidateL) {
break
}
// Check our short candidate
if uint32(cv) == load32(src, candidateS) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
d += emitCopyNoRepeat(dst[d:], offset, s-base)
repeat = offset
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index2 += 2
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
func encodeBlockBetterGo64K(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
// Initialize the hash tables.
// Use smaller tables for smaller blocks
const (
// Long hash matches.
lTableBits = 16
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 13
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint16
var sTable [maxSTableSize]uint16
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := 0
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>6 + 1
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint16(s)
sTable[hashS] = uint16(s)
valLong := load64(src, candidateL)
valShort := load64(src, candidateS)
// If long matches at least 8 bytes, use that.
if cv == valLong {
break
}
if cv == valShort {
candidateL = candidateS
break
}
// Check repeat at offset checkRep.
const checkRep = 1
// Minimum length of a repeat. Tested with various values.
// While 4-5 offers improvements in some, 6 reduces
// regressions significantly.
const wantRepeatBytes = 6
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + wantRepeatBytes + checkRep
s += wantRepeatBytes + checkRep
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidate] {
s++
candidate++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// Index in-between
index0 := base + 1
index1 := s - 2
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint16(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint16(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1)
index0 += 2
index1 -= 2
}
cv = load64(src, s)
continue
}
// Long likely matches 7, so take that.
if uint32(cv) == uint32(valLong) {
break
}
// Check our short candidate
if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint16(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if repeat == offset {
d += emitRepeat(dst[d:], offset, s-base)
} else {
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint16(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1)
// lTable could be postponed, but very minor difference.
lTable[hash7(cv1, lTableBits)] = uint16(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0)
lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2)
index0 += 2
index2 += 2
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappyGo64K(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if len(src) < minNonLiteralBlockSize {
return 0
}
// Initialize the hash tables.
// Use smaller tables for smaller blocks
const (
// Long hash matches.
lTableBits = 15
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 13
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint16
var sTable [maxSTableSize]uint16
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
const maxSkip = 100
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = min(s+(s-nextEmit)>>6+1, s+maxSkip)
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint16(s)
sTable[hashS] = uint16(s)
if uint32(cv) == load32(src, candidateL) {
break
}
// Check our short candidate
if uint32(cv) == load32(src, candidateS) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint16(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
d += emitLiteral(dst[d:], src[nextEmit:base])
d += emitCopyNoRepeat(dst[d:], offset, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint16(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint16(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0)
lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2)
index0 += 2
index2 += 2
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 17
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
maxAhead = 8 // maximum bytes ahead without checking sLimit
debug = false
)
sLimit := len(src) - inputMargin
if sLimit > MaxDictSrcOffset-maxAhead {
sLimit = MaxDictSrcOffset - maxAhead
}
if len(src) < minNonLiteralBlockSize {
return 0
}
dict.initBetter()
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 0
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := len(dict.dict) - dict.repeat
// While in dict
searchDict:
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>7 + 1
if nextS > sLimit {
break searchDict
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
dictL := int(dict.betterTableLong[hashL])
dictS := int(dict.betterTableShort[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
valLong := load64(src, candidateL)
valShort := load64(src, candidateS)
// If long matches at least 8 bytes, use that.
if s != 0 {
if cv == valLong {
goto emitMatch
}
if cv == valShort {
candidateL = candidateS
goto emitMatch
}
}
// Check dict repeat.
if repeat >= s+4 {
candidate := len(dict.dict) - repeat + s
if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
// Extend back
base := s
for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
s += 4
candidate += 4
for candidate < len(dict.dict)-8 && s <= len(src)-8 {
if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitRepeat(dst[d:], repeat, s-base)
if debug {
fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
// Index in-between
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 2
index1 -= 2
}
continue
}
}
// Don't try to find match at s==0
if s == 0 {
cv = load64(src, nextS)
s = nextS
continue
}
// Long likely matches 7, so take that.
if uint32(cv) == uint32(valLong) {
goto emitMatch
}
// Long dict...
if uint32(cv) == load32(dict.dict, dictL) {
candidateL = dictL
goto emitDict
}
// Check our short candidate
if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
goto emitMatch
}
// Use our short candidate.
candidateL = candidateS
goto emitMatch
}
if uint32(cv) == load32(dict.dict, dictS) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
goto emitMatch
}
candidateL = dictS
goto emitDict
}
cv = load64(src, nextS)
s = nextS
}
emitDict:
{
if debug {
if load32(dict.dict, candidateL) != load32(src, s) {
panic("dict emit mismatch")
}
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
{
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
offset := s + (len(dict.dict)) - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if repeat == offset {
if debug {
fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
}
d += emitRepeat(dst[d:], offset, s-base)
} else {
if debug {
fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
}
// Matches longer than 64 are split.
if s <= sLimit || s-base < 8 {
d += emitCopy(dst[d:], offset, s-base)
} else {
// Split to ensure we don't start a copy within next block.
d += emitCopy(dst[d:], offset, 4)
d += emitRepeat(dst[d:], offset, s-base-4)
}
repeat = offset
}
if false {
// Validate match.
if s <= candidateL {
panic("s <= candidate")
}
a := src[base:s]
b := dict.dict[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
index0 += 2
index1 -= 2
}
}
continue
}
emitMatch:
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
if repeat == offset {
if debug {
fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
}
d += emitRepeat(dst[d:], offset, s-base)
} else {
if debug {
fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
}
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index2 += 2
}
}
// Search without dict:
if repeat > s {
repeat = 0
}
// No more dict
sLimit = len(src) - inputMargin
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
if debug {
fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
}
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>7 + 1
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
valLong := load64(src, candidateL)
valShort := load64(src, candidateS)
// If long matches at least 8 bytes, use that.
if cv == valLong {
break
}
if cv == valShort {
candidateL = candidateS
break
}
// Check repeat at offset checkRep.
const checkRep = 1
// Minimum length of a repeat. Tested with various values.
// While 4-5 offers improvements in some, 6 reduces
// regressions significantly.
const wantRepeatBytes = 6
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + wantRepeatBytes + checkRep
s += wantRepeatBytes + checkRep
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidate] {
s++
candidate++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// Index in-between
index0 := base + 1
index1 := s - 2
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 2
index1 -= 2
}
cv = load64(src, s)
continue
}
// Long likely matches 7, so take that.
if uint32(cv) == uint32(valLong) {
break
}
// Check our short candidate
if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if repeat == offset {
d += emitRepeat(dst[d:], offset, s-base)
} else {
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index2 += 2
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}
// Copyright (c) 2022+ Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"sort"
)
const (
S2IndexHeader = "s2idx\x00"
S2IndexTrailer = "\x00xdi2s"
maxIndexEntries = 1 << 16
// If distance is less than this, we do not add the entry.
minIndexDist = 1 << 20
)
// Index represents an S2/Snappy index.
type Index struct {
TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown.
TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown.
info []struct {
compressedOffset int64
uncompressedOffset int64
}
estBlockUncomp int64
}
func (i *Index) reset(maxBlock int) {
i.estBlockUncomp = int64(maxBlock)
i.TotalCompressed = -1
i.TotalUncompressed = -1
if len(i.info) > 0 {
i.info = i.info[:0]
}
}
// allocInfos will allocate an empty slice of infos.
func (i *Index) allocInfos(n int) {
if n > maxIndexEntries {
panic("n > maxIndexEntries")
}
i.info = make([]struct {
compressedOffset int64
uncompressedOffset int64
}, 0, n)
}
// add an uncompressed and compressed pair.
// Entries must be sent in order.
func (i *Index) add(compressedOffset, uncompressedOffset int64) error {
if i == nil {
return nil
}
lastIdx := len(i.info) - 1
if lastIdx >= 0 {
latest := i.info[lastIdx]
if latest.uncompressedOffset == uncompressedOffset {
// Uncompressed didn't change, don't add entry,
// but update start index.
latest.compressedOffset = compressedOffset
i.info[lastIdx] = latest
return nil
}
if latest.uncompressedOffset > uncompressedOffset {
return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
}
if latest.compressedOffset > compressedOffset {
return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset)
}
if latest.uncompressedOffset+minIndexDist > uncompressedOffset {
// Only add entry if distance is large enough.
return nil
}
}
i.info = append(i.info, struct {
compressedOffset int64
uncompressedOffset int64
}{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset})
return nil
}
// Find the offset at or before the wanted (uncompressed) offset.
// If offset is 0 or positive it is the offset from the beginning of the file.
// If the uncompressed size is known, the offset must be within the file.
// If an offset outside the file is requested io.ErrUnexpectedEOF is returned.
// If the offset is negative, it is interpreted as the distance from the end of the file,
// where -1 represents the last byte.
// If offset from the end of the file is requested, but size is unknown,
// ErrUnsupported will be returned.
func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) {
if i.TotalUncompressed < 0 {
return 0, 0, ErrCorrupt
}
if offset < 0 {
offset = i.TotalUncompressed + offset
if offset < 0 {
return 0, 0, io.ErrUnexpectedEOF
}
}
if offset > i.TotalUncompressed {
return 0, 0, io.ErrUnexpectedEOF
}
if len(i.info) > 200 {
n := sort.Search(len(i.info), func(n int) bool {
return i.info[n].uncompressedOffset > offset
})
if n == 0 {
n = 1
}
return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
}
for _, info := range i.info {
if info.uncompressedOffset > offset {
break
}
compressedOff = info.compressedOffset
uncompressedOff = info.uncompressedOffset
}
return compressedOff, uncompressedOff, nil
}
// reduce to stay below maxIndexEntries
func (i *Index) reduce() {
if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist {
return
}
// Algorithm, keep 1, remove removeN entries...
removeN := (len(i.info) + 1) / maxIndexEntries
src := i.info
j := 0
// Each block should be at least 1MB, but don't reduce below 1000 entries.
for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 {
removeN++
}
for idx := 0; idx < len(src); idx++ {
i.info[j] = src[idx]
j++
idx += removeN
}
i.info = i.info[:j]
// Update maxblock estimate.
i.estBlockUncomp += i.estBlockUncomp * int64(removeN)
}
func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte {
i.reduce()
var tmp [binary.MaxVarintLen64]byte
initSize := len(b)
// We make the start a skippable header+size.
b = append(b, ChunkTypeIndex, 0, 0, 0)
b = append(b, []byte(S2IndexHeader)...)
// Total Uncompressed size
n := binary.PutVarint(tmp[:], uncompTotal)
b = append(b, tmp[:n]...)
// Total Compressed size
n = binary.PutVarint(tmp[:], compTotal)
b = append(b, tmp[:n]...)
// Put EstBlockUncomp size
n = binary.PutVarint(tmp[:], i.estBlockUncomp)
b = append(b, tmp[:n]...)
// Put length
n = binary.PutVarint(tmp[:], int64(len(i.info)))
b = append(b, tmp[:n]...)
// Check if we should add uncompressed offsets
var hasUncompressed byte
for idx, info := range i.info {
if idx == 0 {
if info.uncompressedOffset != 0 {
hasUncompressed = 1
break
}
continue
}
if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp {
hasUncompressed = 1
break
}
}
b = append(b, hasUncompressed)
// Add each entry
if hasUncompressed == 1 {
for idx, info := range i.info {
uOff := info.uncompressedOffset
if idx > 0 {
prev := i.info[idx-1]
uOff -= prev.uncompressedOffset + (i.estBlockUncomp)
}
n = binary.PutVarint(tmp[:], uOff)
b = append(b, tmp[:n]...)
}
}
// Initial compressed size estimate.
cPredict := i.estBlockUncomp / 2
for idx, info := range i.info {
cOff := info.compressedOffset
if idx > 0 {
prev := i.info[idx-1]
cOff -= prev.compressedOffset + cPredict
// Update compressed size prediction, with half the error.
cPredict += cOff / 2
}
n = binary.PutVarint(tmp[:], cOff)
b = append(b, tmp[:n]...)
}
// Add Total Size.
// Stored as fixed size for easier reading.
binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer)))
b = append(b, tmp[:4]...)
// Trailer
b = append(b, []byte(S2IndexTrailer)...)
// Update size
chunkLen := len(b) - initSize - skippableFrameHeader
b[initSize+1] = uint8(chunkLen >> 0)
b[initSize+2] = uint8(chunkLen >> 8)
b[initSize+3] = uint8(chunkLen >> 16)
//fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal)
return b
}
// Load a binary index.
// A zero value Index can be used or a previous one can be reused.
func (i *Index) Load(b []byte) ([]byte, error) {
if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) {
return b, io.ErrUnexpectedEOF
}
if b[0] != ChunkTypeIndex {
return b, ErrCorrupt
}
chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
b = b[4:]
// Validate we have enough...
if len(b) < chunkLen {
return b, io.ErrUnexpectedEOF
}
if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
return b, ErrUnsupported
}
b = b[len(S2IndexHeader):]
// Total Uncompressed
if v, n := binary.Varint(b); n <= 0 || v < 0 {
return b, ErrCorrupt
} else {
i.TotalUncompressed = v
b = b[n:]
}
// Total Compressed
if v, n := binary.Varint(b); n <= 0 {
return b, ErrCorrupt
} else {
i.TotalCompressed = v
b = b[n:]
}
// Read EstBlockUncomp
if v, n := binary.Varint(b); n <= 0 {
return b, ErrCorrupt
} else {
if v < 0 {
return b, ErrCorrupt
}
i.estBlockUncomp = v
b = b[n:]
}
var entries int
if v, n := binary.Varint(b); n <= 0 {
return b, ErrCorrupt
} else {
if v < 0 || v > maxIndexEntries {
return b, ErrCorrupt
}
entries = int(v)
b = b[n:]
}
if cap(i.info) < entries {
i.allocInfos(entries)
}
i.info = i.info[:entries]
if len(b) < 1 {
return b, io.ErrUnexpectedEOF
}
hasUncompressed := b[0]
b = b[1:]
if hasUncompressed&1 != hasUncompressed {
return b, ErrCorrupt
}
// Add each uncompressed entry
for idx := range i.info {
var uOff int64
if hasUncompressed != 0 {
// Load delta
if v, n := binary.Varint(b); n <= 0 {
return b, ErrCorrupt
} else {
uOff = v
b = b[n:]
}
}
if idx > 0 {
prev := i.info[idx-1].uncompressedOffset
uOff += prev + (i.estBlockUncomp)
if uOff <= prev {
return b, ErrCorrupt
}
}
if uOff < 0 {
return b, ErrCorrupt
}
i.info[idx].uncompressedOffset = uOff
}
// Initial compressed size estimate.
cPredict := i.estBlockUncomp / 2
// Add each compressed entry
for idx := range i.info {
var cOff int64
if v, n := binary.Varint(b); n <= 0 {
return b, ErrCorrupt
} else {
cOff = v
b = b[n:]
}
if idx > 0 {
// Update compressed size prediction, with half the error.
cPredictNew := cPredict + cOff/2
prev := i.info[idx-1].compressedOffset
cOff += prev + cPredict
if cOff <= prev {
return b, ErrCorrupt
}
cPredict = cPredictNew
}
if cOff < 0 {
return b, ErrCorrupt
}
i.info[idx].compressedOffset = cOff
}
if len(b) < 4+len(S2IndexTrailer) {
return b, io.ErrUnexpectedEOF
}
// Skip size...
b = b[4:]
// Check trailer...
if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
return b, ErrCorrupt
}
return b[len(S2IndexTrailer):], nil
}
// LoadStream will load an index from the end of the supplied stream.
// ErrUnsupported will be returned if the signature cannot be found.
// ErrCorrupt will be returned if unexpected values are found.
// io.ErrUnexpectedEOF is returned if there are too few bytes.
// IO errors are returned as-is.
func (i *Index) LoadStream(rs io.ReadSeeker) error {
// Go to end.
_, err := rs.Seek(-10, io.SeekEnd)
if err != nil {
return err
}
var tmp [10]byte
_, err = io.ReadFull(rs, tmp[:])
if err != nil {
return err
}
// Check trailer...
if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) {
return ErrUnsupported
}
sz := binary.LittleEndian.Uint32(tmp[:4])
if sz > maxChunkSize+skippableFrameHeader {
return ErrCorrupt
}
_, err = rs.Seek(-int64(sz), io.SeekEnd)
if err != nil {
return err
}
// Read index.
buf := make([]byte, sz)
_, err = io.ReadFull(rs, buf)
if err != nil {
return err
}
_, err = i.Load(buf)
return err
}
// IndexStream will return an index for a stream.
// The stream structure will be checked, but
// data within blocks is not verified.
// The returned index can either be appended to the end of the stream
// or stored separately.
func IndexStream(r io.Reader) ([]byte, error) {
var i Index
var buf [maxChunkSize]byte
var readHeader bool
for {
_, err := io.ReadFull(r, buf[:4])
if err != nil {
if err == io.EOF {
return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil
}
return nil, err
}
// Start of this chunk.
startChunk := i.TotalCompressed
i.TotalCompressed += 4
chunkType := buf[0]
if !readHeader {
if chunkType != chunkTypeStreamIdentifier {
return nil, ErrCorrupt
}
readHeader = true
}
chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16
if chunkLen < checksumSize {
return nil, ErrCorrupt
}
i.TotalCompressed += int64(chunkLen)
_, err = io.ReadFull(r, buf[:chunkLen])
if err != nil {
return nil, io.ErrUnexpectedEOF
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
// Skip checksum.
dLen, err := DecodedLen(buf[checksumSize:])
if err != nil {
return nil, err
}
if dLen > maxBlockSize {
return nil, ErrCorrupt
}
if i.estBlockUncomp == 0 {
// Use first block for estimate...
i.estBlockUncomp = int64(dLen)
}
err = i.add(startChunk, i.TotalUncompressed)
if err != nil {
return nil, err
}
i.TotalUncompressed += int64(dLen)
continue
case chunkTypeUncompressedData:
n2 := chunkLen - checksumSize
if n2 > maxBlockSize {
return nil, ErrCorrupt
}
if i.estBlockUncomp == 0 {
// Use first block for estimate...
i.estBlockUncomp = int64(n2)
}
err = i.add(startChunk, i.TotalUncompressed)
if err != nil {
return nil, err
}
i.TotalUncompressed += int64(n2)
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
return nil, ErrCorrupt
}
if string(buf[:len(magicBody)]) != magicBody {
if string(buf[:len(magicBody)]) != magicBodySnappy {
return nil, ErrCorrupt
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
return nil, ErrUnsupported
}
if chunkLen > maxChunkSize {
return nil, ErrUnsupported
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
}
}
// JSON returns the index as JSON text.
func (i *Index) JSON() []byte {
type offset struct {
CompressedOffset int64 `json:"compressed"`
UncompressedOffset int64 `json:"uncompressed"`
}
x := struct {
TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
Offsets []offset `json:"offsets"`
EstBlockUncomp int64 `json:"est_block_uncompressed"`
}{
TotalUncompressed: i.TotalUncompressed,
TotalCompressed: i.TotalCompressed,
EstBlockUncomp: i.estBlockUncomp,
}
for _, v := range i.info {
x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
}
b, _ := json.MarshalIndent(x, "", " ")
return b
}
// RemoveIndexHeaders will trim all headers and trailers from a given index.
// This is expected to save 20 bytes.
// These can be restored using RestoreIndexHeaders.
// This removes a layer of security, but is the most compact representation.
// Returns nil if headers contains errors.
// The returned slice references the provided slice.
func RemoveIndexHeaders(b []byte) []byte {
const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4
if len(b) <= save {
return nil
}
if b[0] != ChunkTypeIndex {
return nil
}
chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
b = b[4:]
// Validate we have enough...
if len(b) < chunkLen {
return nil
}
b = b[:chunkLen]
if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
return nil
}
b = b[len(S2IndexHeader):]
if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) {
return nil
}
b = bytes.TrimSuffix(b, []byte(S2IndexTrailer))
if len(b) < 4 {
return nil
}
return b[:len(b)-4]
}
// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
// No error checking is performed on the input.
// If a 0 length slice is sent, it is returned without modification.
func RestoreIndexHeaders(in []byte) []byte {
if len(in) == 0 {
return in
}
b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4)
b = append(b, ChunkTypeIndex, 0, 0, 0)
b = append(b, []byte(S2IndexHeader)...)
b = append(b, in...)
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer)))
b = append(b, tmp[:4]...)
// Trailer
b = append(b, []byte(S2IndexTrailer)...)
chunkLen := len(b) - skippableFrameHeader
b[1] = uint8(chunkLen >> 0)
b[2] = uint8(chunkLen >> 8)
b[3] = uint8(chunkLen >> 16)
return b
}
// Copyright (c) 2022 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"errors"
"fmt"
)
// LZ4Converter provides conversion from LZ4 blocks as defined here:
// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
type LZ4Converter struct {
}
// ErrDstTooSmall is returned when provided destination is too small.
var ErrDstTooSmall = errors.New("s2: destination too small")
// ConvertBlock will convert an LZ4 block and append it as an S2
// block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const inline = true
const lz4MinMatch = 4
s, d := 0, len(dst)
dst = dst[:cap(dst)]
if !debug && hasAmd64Asm {
res, sz := cvtLZ4BlockAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var lastOffset uint16
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return dst[:d], 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return dst[:d], 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if s == len(src) && ml == lz4MinMatch {
break
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if offset == lastOffset {
if debug {
fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitRepeat16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
d += 2
break
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
d += 2
break
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
d += 3
break
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
d += 4
break
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
d += 5 + emitRepeat16(dst[5:], offset, left)
break
}
d += 5
break
}
}
} else {
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitCopy16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Offset no more than 2 bytes.
if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain.
d += off + emitRepeat16(dst[off:], offset, length)
break
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
}
lastOffset = offset
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// ConvertBlockSnappy will convert an LZ4 block and append it
// as a Snappy block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const lz4MinMatch = 4
s, d := 0, len(dst)
dst = dst[:cap(dst)]
// Use assembly when possible
if !debug && hasAmd64Asm {
res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return nil, 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if s == len(src) && ml == lz4MinMatch {
break
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
length := ml
// d += emitCopyNoRepeat(dst[d:], int(offset), ml)
for length > 0 {
if d >= dLimit {
return nil, 0, ErrDstTooSmall
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = 63<<2 | tagCopy2
length -= 64
d += 3
continue
}
if length >= 12 || offset >= 2048 || length < 4 {
// Emit the remaining copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[d+1] = uint8(offset)
dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// emitRepeat writes a repeat chunk and returns the number of bytes written.
// Length must be at least 4 and < 1<<24
func emitRepeat16(dst []byte, offset uint16, length int) int {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
return 2
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
return 2
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
return 3
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
return 4
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
return 5 + emitRepeat16(dst[5:], offset, left)
}
return 5
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint16
// 4 <= length && length <= math.MaxUint32
func emitCopy16(dst []byte, offset uint16, length int) int {
// Offset no more than 2 bytes.
if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain.
return off + emitRepeat16(dst[off:], offset, length)
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
return 2
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
//
// dst is long enough to hold the encoded bytes
// 0 <= len(lit) && len(lit) <= math.MaxUint32
func emitLiteralGo(dst, lit []byte) int {
if len(lit) == 0 {
return 0
}
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[1] = uint8(n)
dst[0] = 60<<2 | tagLiteral
i = 2
case n < 1<<16:
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 61<<2 | tagLiteral
i = 3
case n < 1<<24:
dst[3] = uint8(n >> 16)
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 62<<2 | tagLiteral
i = 4
default:
dst[4] = uint8(n >> 24)
dst[3] = uint8(n >> 16)
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 63<<2 | tagLiteral
i = 5
}
return i + copy(dst[i:], lit)
}
// Copyright (c) 2022 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"fmt"
)
// LZ4sConverter provides conversion from LZ4s.
// (Intel modified LZ4 Blocks)
// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf
// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format.
// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData.
// The LZ4s block returned by the Intel® QAT hardware can be used by an external
// software post-processing to generate other compressed data formats.
// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses
// the same high-level formatting as LZ4 block format with the following encoding changes:
// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte.
// ONLY "Min match of 4 bytes" is supported.
type LZ4sConverter struct {
}
// ConvertBlock will convert an LZ4s block and append it as an S2
// block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const inline = true
const lz4MinMatch = 3
s, d := 0, len(dst)
dst = dst[:cap(dst)]
if !debug && hasAmd64Asm {
res, sz := cvtLZ4sBlockAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var lastOffset uint16
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return dst[:d], 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return dst[:d], 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if ml == lz4MinMatch {
if s == len(src) {
break
}
// 0 bytes.
continue
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if offset == lastOffset {
if debug {
fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitRepeat16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
d += 2
break
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
d += 2
break
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
d += 3
break
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
d += 4
break
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
d += 5 + emitRepeat16(dst[5:], offset, left)
break
}
d += 5
break
}
}
} else {
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitCopy16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Offset no more than 2 bytes.
if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain.
d += off + emitRepeat16(dst[off:], offset, length)
break
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
}
lastOffset = offset
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// ConvertBlockSnappy will convert an LZ4s block and append it
// as a Snappy block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const lz4MinMatch = 3
s, d := 0, len(dst)
dst = dst[:cap(dst)]
// Use assembly when possible
if !debug && hasAmd64Asm {
res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return nil, 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if ml == lz4MinMatch {
if s == len(src) {
break
}
// 0 bytes.
continue
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
length := ml
// d += emitCopyNoRepeat(dst[d:], int(offset), ml)
for length > 0 {
if d >= dLimit {
return nil, 0, ErrDstTooSmall
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = 63<<2 | tagCopy2
length -= 64
d += 3
continue
}
if length >= 12 || offset >= 2048 || length < 4 {
// Emit the remaining copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[d+1] = uint8(offset)
dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019+ Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"runtime"
"sync"
)
// ErrCantSeek is returned if the stream cannot be seeked.
type ErrCantSeek struct {
Reason string
}
// Error returns the error as string.
func (e ErrCantSeek) Error() string {
return fmt.Sprintf("s2: Can't seek because %s", e.Reason)
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
func NewReader(r io.Reader, opts ...ReaderOption) *Reader {
nr := Reader{
r: r,
maxBlock: maxBlockSize,
}
for _, opt := range opts {
if err := opt(&nr); err != nil {
nr.err = err
return &nr
}
}
nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize
if nr.lazyBuf > 0 {
nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize)
} else {
nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
}
nr.readHeader = nr.ignoreStreamID
nr.paramsOK = true
return &nr
}
// ReaderOption is an option for creating a decoder.
type ReaderOption func(*Reader) error
// ReaderMaxBlockSize allows to control allocations if the stream
// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
// Blocks must be this size or smaller to decompress,
// otherwise the decoder will return ErrUnsupported.
//
// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
//
// Default is the maximum limit of 4MB.
func ReaderMaxBlockSize(blockSize int) ReaderOption {
return func(r *Reader) error {
if blockSize > maxBlockSize || blockSize <= 0 {
return errors.New("s2: block size too large. Must be <= 4MB and > 0")
}
if r.lazyBuf == 0 && blockSize < defaultBlockSize {
r.lazyBuf = blockSize
}
r.maxBlock = blockSize
return nil
}
}
// ReaderAllocBlock allows to control upfront stream allocations
// and not allocate for frames bigger than this initially.
// If frames bigger than this is seen a bigger buffer will be allocated.
//
// Default is 1MB, which is default output size.
func ReaderAllocBlock(blockSize int) ReaderOption {
return func(r *Reader) error {
if blockSize > maxBlockSize || blockSize < 1024 {
return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
}
r.lazyBuf = blockSize
return nil
}
}
// ReaderIgnoreStreamIdentifier will make the reader skip the expected
// stream identifier at the beginning of the stream.
// This can be used when serving a stream that has been forwarded to a specific point.
func ReaderIgnoreStreamIdentifier() ReaderOption {
return func(r *Reader) error {
r.ignoreStreamID = true
return nil
}
}
// ReaderSkippableCB will register a callback for chuncks with the specified ID.
// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
// For each chunk with the ID, the callback is called with the content.
// Any returned non-nil error will abort decompression.
// Only one callback per ID is supported, latest sent will be used.
// You can peek the stream, triggering the callback, by doing a Read with a 0
// byte buffer.
func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
return func(r *Reader) error {
if id < 0x80 || id > 0xfd {
return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)")
}
r.skippableCB[id-0x80] = fn
return nil
}
}
// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
func ReaderIgnoreCRC() ReaderOption {
return func(r *Reader) error {
r.ignoreCRC = true
return nil
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
skippableCB [0xff - 0x80]func(r io.Reader) error
blockStart int64 // Uncompressed offset at start of current.
index *Index
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
// maximum block size allowed.
maxBlock int
// maximum expected buffer size.
maxBufSize int
// alloc a buffer this size if > 0.
lazyBuf int
readHeader bool
paramsOK bool
snappyFrame bool
ignoreStreamID bool
ignoreCRC bool
}
// GetBufferCapacity returns the capacity of the internal buffer.
// This might be useful to know when reusing the same reader in combination
// with the lazy buffer option.
func (r *Reader) GetBufferCapacity() int {
return cap(r.buf)
}
// ensureBufferSize will ensure that the buffer can take at least n bytes.
// If false is returned the buffer exceeds maximum allowed size.
func (r *Reader) ensureBufferSize(n int) bool {
if n > r.maxBufSize {
r.err = ErrCorrupt
return false
}
if cap(r.buf) >= n {
return true
}
// Realloc buffer.
r.buf = make([]byte, n)
return true
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
if !r.paramsOK {
return
}
r.index = nil
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.blockStart = 0
r.readHeader = r.ignoreStreamID
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
// skippable will skip n bytes.
// If the supplied reader supports seeking that is used.
// tmp is used as a temporary buffer for reading.
// The supplied slice does not need to be the size of the read.
func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) {
if id < 0x80 {
r.err = fmt.Errorf("internal error: skippable id < 0x80")
return false
}
if fn := r.skippableCB[id-0x80]; fn != nil {
rd := io.LimitReader(r.r, int64(n))
r.err = fn(rd)
if r.err != nil {
return false
}
_, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp)
return r.err == nil
}
if rs, ok := r.r.(io.ReadSeeker); ok {
_, err := rs.Seek(int64(n), io.SeekCurrent)
if err == nil {
return true
}
if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
return false
}
}
for n > 0 {
if n < len(tmp) {
tmp = tmp[:n]
}
if _, r.err = io.ReadFull(r.r, tmp); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
n -= len(tmp)
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
r.blockStart += int64(r.j)
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err == nil {
r.err = ErrUnsupported
}
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > len(r.decoded) {
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
r.decoded = make([]byte, n)
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
r.err = ErrCRC
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
r.blockStart += int64(r.j)
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err == nil {
r.err = ErrUnsupported
}
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > len(r.decoded) {
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
r.decoded = make([]byte, n)
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
r.err = ErrCRC
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
if string(r.buf[:len(magicBody)]) != magicBody {
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
r.err = ErrCorrupt
return 0, r.err
} else {
r.snappyFrame = true
}
} else {
r.snappyFrame = false
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
// fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if chunkLen > maxChunkSize {
// fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
r.err = ErrUnsupported
return 0, r.err
}
// fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
if !r.skippable(r.buf, chunkLen, false, chunkType) {
return 0, r.err
}
}
}
// DecodeConcurrent will decode the full stream to w.
// This function should not be combined with reading, seeking or other operations.
// Up to 'concurrent' goroutines will be used.
// If <= 0, runtime.NumCPU will be used.
// On success the number of bytes decompressed nil and is returned.
// This is mainly intended for bigger streams.
func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
if r.i > 0 || r.j > 0 || r.blockStart > 0 {
return 0, errors.New("DecodeConcurrent called after ")
}
if concurrent <= 0 {
concurrent = runtime.NumCPU()
}
// Write to output
var errMu sync.Mutex
var aErr error
setErr := func(e error) (ok bool) {
errMu.Lock()
defer errMu.Unlock()
if e == nil {
return aErr == nil
}
if aErr == nil {
aErr = e
}
return false
}
hasErr := func() (ok bool) {
errMu.Lock()
v := aErr != nil
errMu.Unlock()
return v
}
var aWritten int64
toRead := make(chan []byte, concurrent)
writtenBlocks := make(chan []byte, concurrent)
queue := make(chan chan []byte, concurrent)
reUse := make(chan chan []byte, concurrent)
for i := 0; i < concurrent; i++ {
toRead <- make([]byte, 0, r.maxBufSize)
writtenBlocks <- make([]byte, 0, r.maxBufSize)
reUse <- make(chan []byte, 1)
}
// Writer
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for toWrite := range queue {
entry := <-toWrite
reUse <- toWrite
if hasErr() || entry == nil {
if entry != nil {
writtenBlocks <- entry
}
continue
}
if hasErr() {
writtenBlocks <- entry
continue
}
n, err := w.Write(entry)
want := len(entry)
writtenBlocks <- entry
if err != nil {
setErr(err)
continue
}
if n != want {
setErr(io.ErrShortWrite)
continue
}
aWritten += int64(n)
}
}()
defer func() {
if r.err != nil {
setErr(r.err)
} else if err != nil {
setErr(err)
}
close(queue)
wg.Wait()
if err == nil {
err = aErr
}
written = aWritten
}()
// Reader
for !hasErr() {
if !r.readFull(r.buf[:4], true) {
if r.err == io.EOF {
r.err = nil
}
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
r.blockStart += int64(r.j)
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if chunkLen > r.maxBufSize {
r.err = ErrCorrupt
return 0, r.err
}
orgBuf := <-toRead
buf := orgBuf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
wg.Add(1)
decoded := <-writtenBlocks
entry := <-reUse
queue <- entry
go func() {
defer wg.Done()
decoded = decoded[:n]
_, err := Decode(decoded, buf)
toRead <- orgBuf
if err != nil {
writtenBlocks <- decoded
setErr(err)
entry <- nil
return
}
if !r.ignoreCRC && crc(decoded) != checksum {
writtenBlocks <- decoded
setErr(ErrCRC)
entry <- nil
return
}
entry <- decoded
}()
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
if chunkLen > r.maxBufSize {
r.err = ErrCorrupt
return 0, r.err
}
// Grab write buffer
orgBuf := <-writtenBlocks
buf := orgBuf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read content.
n := chunkLen - checksumSize
if r.snappyFrame && n > maxSnappyBlockSize {
r.err = ErrCorrupt
return 0, r.err
}
if n > r.maxBlock {
r.err = ErrCorrupt
return 0, r.err
}
// Read uncompressed
buf = orgBuf[:n]
if !r.readFull(buf, false) {
return 0, r.err
}
if !r.ignoreCRC && crc(buf) != checksum {
r.err = ErrCRC
return 0, r.err
}
entry := <-reUse
queue <- entry
entry <- buf
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
if string(r.buf[:len(magicBody)]) != magicBody {
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
r.err = ErrCorrupt
return 0, r.err
} else {
r.snappyFrame = true
}
} else {
r.snappyFrame = false
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
// fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if chunkLen > maxChunkSize {
// fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
r.err = ErrUnsupported
return 0, r.err
}
// fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
if !r.skippable(r.buf, chunkLen, false, chunkType) {
return 0, r.err
}
}
return 0, r.err
}
// Skip will skip n bytes forward in the decompressed output.
// For larger skips this consumes less CPU and is faster than reading output and discarding it.
// CRC is not checked on skipped blocks.
// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
// If a decoding error is encountered subsequent calls to Read will also fail.
func (r *Reader) Skip(n int64) error {
if n < 0 {
return errors.New("attempted negative skip")
}
if r.err != nil {
return r.err
}
for n > 0 {
if r.i < r.j {
// Skip in buffer.
// decoded[i:j] contains decoded bytes that have not yet been passed on.
left := int64(r.j - r.i)
if left >= n {
tmp := int64(r.i) + n
if tmp > math.MaxInt32 {
return errors.New("s2: internal overflow in skip")
}
r.i = int(tmp)
return nil
}
n -= int64(r.j - r.i)
r.i = r.j
}
// Buffer empty; read blocks until we have content.
if !r.readFull(r.buf[:4], true) {
if r.err == io.EOF {
r.err = io.ErrUnexpectedEOF
}
return r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
r.blockStart += int64(r.j)
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err == nil {
r.err = ErrUnsupported
}
return r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
dLen, err := DecodedLen(buf)
if err != nil {
r.err = err
return r.err
}
if dLen > r.maxBlock {
r.err = ErrCorrupt
return r.err
}
// Check if destination is within this block
if int64(dLen) > n {
if len(r.decoded) < dLen {
r.decoded = make([]byte, dLen)
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return r.err
}
if crc(r.decoded[:dLen]) != checksum {
r.err = ErrCorrupt
return r.err
}
} else {
// Skip block completely
n -= int64(dLen)
r.blockStart += int64(dLen)
dLen = 0
}
r.i, r.j = 0, dLen
continue
case chunkTypeUncompressedData:
r.blockStart += int64(r.j)
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return r.err
}
if !r.ensureBufferSize(chunkLen) {
if r.err != nil {
r.err = ErrUnsupported
}
return r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n2 := chunkLen - checksumSize
if n2 > len(r.decoded) {
if n2 > r.maxBlock {
r.err = ErrCorrupt
return r.err
}
r.decoded = make([]byte, n2)
}
if !r.readFull(r.decoded[:n2], false) {
return r.err
}
if int64(n2) < n {
if crc(r.decoded[:n2]) != checksum {
r.err = ErrCorrupt
return r.err
}
}
r.i, r.j = 0, n2
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return r.err
}
if string(r.buf[:len(magicBody)]) != magicBody {
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
r.err = ErrCorrupt
return r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return r.err
}
if chunkLen > maxChunkSize {
r.err = ErrUnsupported
return r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.skippable(r.buf, chunkLen, false, chunkType) {
return r.err
}
}
return nil
}
// ReadSeeker provides random or forward seeking in compressed content.
// See Reader.ReadSeeker
type ReadSeeker struct {
*Reader
readAtMu sync.Mutex
}
// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
// compatible version of the reader.
// If 'random' is specified the returned io.Seeker can be used for
// random seeking, otherwise only forward seeking is supported.
// Enabling random seeking requires the original input to support
// the io.Seeker interface.
// A custom index can be specified which will be used if supplied.
// When using a custom index, it will not be read from the input stream.
// The ReadAt position will affect regular reads and the current position of Seek.
// So using Read after ReadAt will continue from where the ReadAt stopped.
// No functions should be used concurrently.
// The returned ReadSeeker contains a shallow reference to the existing Reader,
// meaning changes performed to one is reflected in the other.
func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
// Read index if provided.
if len(index) != 0 {
if r.index == nil {
r.index = &Index{}
}
if _, err := r.index.Load(index); err != nil {
return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()}
}
}
// Check if input is seekable
rs, ok := r.r.(io.ReadSeeker)
if !ok {
if !random {
return &ReadSeeker{Reader: r}, nil
}
return nil, ErrCantSeek{Reason: "input stream isn't seekable"}
}
if r.index != nil {
// Seekable and index, ok...
return &ReadSeeker{Reader: r}, nil
}
// Load from stream.
r.index = &Index{}
// Read current position.
pos, err := rs.Seek(0, io.SeekCurrent)
if err != nil {
return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
}
err = r.index.LoadStream(rs)
if err != nil {
if err == ErrUnsupported {
// If we don't require random seeking, reset input and return.
if !random {
_, err = rs.Seek(pos, io.SeekStart)
if err != nil {
return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()}
}
r.index = nil
return &ReadSeeker{Reader: r}, nil
}
return nil, ErrCantSeek{Reason: "input stream does not contain an index"}
}
return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()}
}
// reset position.
_, err = rs.Seek(pos, io.SeekStart)
if err != nil {
return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()}
}
return &ReadSeeker{Reader: r}, nil
}
// Seek allows seeking in compressed data.
func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
if r.err != nil {
if !errors.Is(r.err, io.EOF) {
return 0, r.err
}
// Reset on EOF
r.err = nil
}
// Calculate absolute offset.
absOffset := offset
switch whence {
case io.SeekStart:
case io.SeekCurrent:
absOffset = r.blockStart + int64(r.i) + offset
case io.SeekEnd:
if r.index == nil {
return 0, ErrUnsupported
}
absOffset = r.index.TotalUncompressed + offset
default:
r.err = ErrUnsupported
return 0, r.err
}
if absOffset < 0 {
return 0, errors.New("seek before start of file")
}
if !r.readHeader {
// Make sure we read the header.
_, r.err = r.Read([]byte{})
if r.err != nil {
return 0, r.err
}
}
// If we are inside current block no need to seek.
// This includes no offset changes.
if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
r.i = int(absOffset - r.blockStart)
return r.blockStart + int64(r.i), nil
}
rs, ok := r.r.(io.ReadSeeker)
if r.index == nil || !ok {
currOffset := r.blockStart + int64(r.i)
if absOffset >= currOffset {
err := r.Skip(absOffset - currOffset)
return r.blockStart + int64(r.i), err
}
return 0, ErrUnsupported
}
// We can seek and we have an index.
c, u, err := r.index.Find(absOffset)
if err != nil {
return r.blockStart + int64(r.i), err
}
// Seek to next block
_, err = rs.Seek(c, io.SeekStart)
if err != nil {
return 0, err
}
r.i = r.j // Remove rest of current block.
r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
if u < absOffset {
// Forward inside block
return absOffset, r.Skip(absOffset - u)
}
if u > absOffset {
return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
}
return absOffset, nil
}
// ReadAt reads len(p) bytes into p starting at offset off in the
// underlying input source. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error
// explaining why more bytes were not returned. In this respect,
// ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the
// input source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the
// same input source. This is however not recommended.
func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
r.readAtMu.Lock()
defer r.readAtMu.Unlock()
_, err := r.Seek(offset, io.SeekStart)
if err != nil {
return 0, err
}
n := 0
for n < len(p) {
n2, err := r.Read(p[n:])
if err != nil {
// This will include io.EOF
return n + n2, err
}
n += n2
}
return n, nil
}
// ReadByte satisfies the io.ByteReader interface.
func (r *Reader) ReadByte() (byte, error) {
if r.err != nil {
return 0, r.err
}
if r.i < r.j {
c := r.decoded[r.i]
r.i++
return c, nil
}
var tmp [1]byte
for i := 0; i < 10; i++ {
n, err := r.Read(tmp[:])
if err != nil {
return 0, err
}
if n == 1 {
return tmp[0], nil
}
}
return 0, io.ErrNoProgress
}
// SkippableCB will register a callback for chunks with the specified ID.
// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive).
// For each chunk with the ID, the callback is called with the content.
// Any returned non-nil error will abort decompression.
// Only one callback per ID is supported, latest sent will be used.
// Sending a nil function will disable previous callbacks.
// You can peek the stream, triggering the callback, by doing a Read with a 0
// byte buffer.
func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
if id < 0x80 || id >= chunkTypePadding {
return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)")
}
r.skippableCB[id-0x80] = fn
return nil
}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package s2 implements the S2 compression format.
//
// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
// which is why it features concurrent compression for bigger payloads.
//
// Decoding is compatible with Snappy compressed content,
// but content compressed with S2 cannot be decompressed by Snappy.
//
// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
//
// There are actually two S2 formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a S2 stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// A "better" compression option is available. This will trade some compression
// speed
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// Blocks to not offer much data protection, so it is up to you to
// add data validation of decompressed blocks.
//
// Streams perform CRC validation of the decompressed data.
// Stream compression will also be performed on multiple CPU cores concurrently
// significantly improving throughput.
package s2
import (
"bytes"
"hash/crc32"
"github.com/klauspost/compress/internal/race"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
magicBodySnappy = "sNaPpY"
magicBody = "S2sTwO"
// maxBlockSize is the maximum size of the input to encodeBlock.
//
// For the framing format (Writer type instead of Encode function),
// this is the maximum uncompressed size of a block.
maxBlockSize = 4 << 20
// minBlockSize is the minimum size of block setting when creating a writer.
minBlockSize = 4 << 10
skippableFrameHeader = 4
maxChunkSize = 1<<24 - 1 // 16777215
// Default block size
defaultBlockSize = 1 << 20
// maxSnappyBlockSize is the maximum snappy block size.
maxSnappyBlockSize = 1 << 16
obufHeaderLen = checksumSize + chunkHeaderSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
ChunkTypeIndex = 0x99
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var (
crcTable = crc32.MakeTable(crc32.Castagnoli)
magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes.
magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes.
)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
race.ReadSlice(b)
c := crc32.Update(0, crcTable, b)
return c>>15 | c<<17 + 0xa282ead8
}
// literalExtraSize returns the extra size of encoding n literals.
// n should be >= 0 and <= math.MaxUint32.
func literalExtraSize(n int64) int64 {
if n == 0 {
return 0
}
switch {
case n < 60:
return 1
case n < 1<<8:
return 2
case n < 1<<16:
return 3
case n < 1<<24:
return 4
default:
return 5
}
}
type byter interface {
Bytes() []byte
}
var _ byter = &bytes.Buffer{}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/klauspost/compress/internal/snapref"
"github.com/klauspost/compress/zip"
"github.com/klauspost/compress/zstd"
)
const maxUint = ^uint(0)
const maxInt = int(maxUint >> 1)
var (
download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data")
benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data")
)
func TestMaxEncodedLen(t *testing.T) {
testSet := []struct {
in, out int64
}{
0: {in: 0, out: 1},
1: {in: 1 << 24, out: 1<<24 + int64(binary.PutVarint([]byte{binary.MaxVarintLen32: 0}, int64(1<<24))) + literalExtraSize(1<<24)},
2: {in: MaxBlockSize, out: math.MaxUint32},
3: {in: math.MaxUint32 - binary.MaxVarintLen32 - literalExtraSize(math.MaxUint32), out: math.MaxUint32},
4: {in: math.MaxUint32 - 9, out: -1},
5: {in: math.MaxUint32 - 8, out: -1},
6: {in: math.MaxUint32 - 7, out: -1},
7: {in: math.MaxUint32 - 6, out: -1},
8: {in: math.MaxUint32 - 5, out: -1},
9: {in: math.MaxUint32 - 4, out: -1},
10: {in: math.MaxUint32 - 3, out: -1},
11: {in: math.MaxUint32 - 2, out: -1},
12: {in: math.MaxUint32 - 1, out: -1},
13: {in: math.MaxUint32, out: -1},
14: {in: -1, out: -1},
15: {in: -2, out: -1},
}
// 32 bit platforms have a different threshold.
if maxInt == math.MaxInt32 {
testSet[2].out = math.MaxInt32
testSet[3].out = -1
}
t.Log("Maxblock:", MaxBlockSize, "reduction:", intReduction)
// Test all sizes up to maxBlockSize.
for i := int64(0); i < maxBlockSize; i++ {
testSet = append(testSet, struct{ in, out int64 }{in: i, out: i + int64(binary.PutVarint([]byte{binary.MaxVarintLen32: 0}, i)) + literalExtraSize(i)})
}
for i := range testSet {
tt := testSet[i]
want := tt.out
got := int64(MaxEncodedLen(int(tt.in)))
if got != want {
t.Errorf("test %d: input: %d, want: %d, got: %d", i, tt.in, want, got)
}
}
}
func cmp(got, want []byte) error {
if bytes.Equal(got, want) {
return nil
}
if len(got) != len(want) {
return fmt.Errorf("got %d bytes, want %d", len(got), len(want))
}
for i := range got {
if got[i] != want[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, got[i], want[i])
}
}
return nil
}
func roundtrip(b, ebuf, dbuf []byte) error {
bOrg := make([]byte, len(b))
copy(bOrg, b)
asmEnc := Encode(nil, b)
if err := cmp(bOrg, b); err != nil {
return fmt.Errorf("src was changed: %v", err)
}
goEnc := encodeGo(nil, b)
if err := cmp(bOrg, b); err != nil {
return fmt.Errorf("src was changed: %v", err)
}
//fmt.Println("asm:", len(asmEnc), "go:", len(goEnc))
dGo, err := Decode(nil, goEnc)
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
if err := cmp(dGo, b); err != nil {
return fmt.Errorf("roundtrip mismatch: %v", err)
}
// fmt.Println("decode asm...")
d, err := Decode(nil, asmEnc)
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
if err := cmp(d, b); err != nil {
return fmt.Errorf("roundtrip mismatch: %v", err)
}
d, err = Decode(dbuf, EncodeBetter(ebuf, b))
if err != nil {
return fmt.Errorf("decoding better error: %v", err)
}
if err := cmp(d, b); err != nil {
return fmt.Errorf("roundtrip better mismatch: %v", err)
}
// Test concat with some existing data.
dst := []byte("existing")
// Add 3 different encodes and a 0 length block.
concat, err := ConcatBlocks(dst, Encode(nil, b), EncodeBetter(nil, b), []byte{0}, EncodeSnappy(nil, b))
if err != nil {
return fmt.Errorf("concat error: %v", err)
}
if err := cmp(concat[:len(dst)], dst); err != nil {
return fmt.Errorf("concat existing mismatch: %v", err)
}
concat = concat[len(dst):]
d, _ = Decode(nil, concat)
want := append(make([]byte, 0, len(b)*3), b...)
want = append(want, b...)
want = append(want, b...)
if err := cmp(d, want); err != nil {
return fmt.Errorf("roundtrip concat mismatch: %v", err)
}
return nil
}
func TestEmpty(t *testing.T) {
if err := roundtrip(nil, nil, nil); err != nil {
t.Fatal(err)
}
}
func TestSmallCopy(t *testing.T) {
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for i := 0; i < 32; i++ {
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
}
}
}
}
}
func TestSmallRand(t *testing.T) {
rng := rand.New(rand.NewSource(1))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(rng.Intn(256))
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestSmallRepeat(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b[:n/2] {
b[i] = uint8(i * 255 / n)
}
for i := range b[n/2:] {
b[i+n/2] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestInvalidVarint(t *testing.T) {
testCases := []struct {
desc string
input string
}{{
"invalid varint, final byte has continuation bit set",
"\xff",
}, {
"invalid varint, value overflows uint64",
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00",
}, {
// https://github.com/google/snappy/blob/master/format_description.txt
// says that "the stream starts with the uncompressed length [as a
// varint] (up to a maximum of 2^32 - 1)".
"valid varint (as uint64), but value overflows uint32",
"\x80\x80\x80\x80\x10",
}}
for _, tc := range testCases {
input := []byte(tc.input)
if _, err := DecodedLen(input); err != ErrCorrupt {
t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err)
}
if _, err := Decode(nil, input); err != ErrCorrupt {
t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err)
}
}
}
func TestDecode(t *testing.T) {
lit40Bytes := make([]byte, 40)
for i := range lit40Bytes {
lit40Bytes[i] = byte(i)
}
lit40 := string(lit40Bytes)
testCases := []struct {
desc string
input string
want string
wantErr error
}{{
`decodedLen=0; valid input`,
"\x00",
"",
nil,
}, {
`decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`,
"\x03" + "\x08\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`,
"\x02" + "\x08\xff\xff\xff",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`,
"\x03" + "\x08\xff\xff",
"",
ErrCorrupt,
}, {
`decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`,
"\x28" + "\x9c" + lit40,
lit40,
nil,
}, {
`decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`,
"\x01" + "\xf0",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`,
"\x03" + "\xf0\x02\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`,
"\x01" + "\xf4\x00",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`,
"\x03" + "\xf4\x02\x00\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`,
"\x01" + "\xf8\x00\x00",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`,
"\x03" + "\xf8\x02\x00\x00\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`,
"\x01" + "\xfc\x00\x00\x00",
"",
ErrCorrupt,
}, {
`decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`,
"\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`,
"\x04" + "\xfc\x02\x00\x00\x00\xff",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`,
"\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`,
"\x04" + "\x01",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`,
"\x04" + "\x02\x00",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`,
"\x04" + "\x03\x00\x00\x00",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`,
"\x04" + "\x0cabcd",
"abcd",
nil,
}, {
`decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`,
"\x0d" + "\x0cabcd" + "\x15\x04",
"abcdabcdabcda",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`,
"\x08" + "\x0cabcd" + "\x01\x04",
"abcdabcd",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`,
"\x08" + "\x0cabcd" + "\x01\x02",
"abcdcdcd",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`,
"\x08" + "\x0cabcd" + "\x01\x01",
"abcddddd",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; repeat offset as first match`,
"\x08" + "\x0cabcd" + "\x01\x00",
"",
ErrCorrupt,
}, {
`decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; literal: 'z'; tagCopy1; length=4 offset=0; repeat offset as second match`,
"\x0d" + "\x0cabcd" + "\x01\x01" + "\x00z" + "\x01\x00",
"abcdddddzzzzz",
nil,
}, {
`decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`,
"\x09" + "\x0cabcd" + "\x01\x04",
"",
ErrCorrupt,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`,
"\x08" + "\x0cabcd" + "\x01\x05",
"",
ErrCorrupt,
}, {
`decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`,
"\x07" + "\x0cabcd" + "\x01\x04",
"",
ErrCorrupt,
}, {
`decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`,
"\x06" + "\x0cabcd" + "\x06\x03\x00",
"abcdbc",
nil,
}, {
`decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`,
"\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00",
"abcdbc",
nil,
}}
const (
// notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
// not present in either the input or the output. It is written to dBuf
// to check that Decode does not write bytes past the end of
// dBuf[:dLen].
//
// The magic number 37 was chosen because it is prime. A more 'natural'
// number like 32 might lead to a false negative if, for example, a
// byte was incorrectly copied 4*8 bytes later.
notPresentBase = 0xa0
notPresentLen = 37
)
var dBuf [100]byte
loop:
for i, tc := range testCases {
input := []byte(tc.input)
for _, x := range input {
if notPresentBase <= x && x < notPresentBase+notPresentLen {
t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input)
continue loop
}
}
dLen, n := binary.Uvarint(input)
if n <= 0 {
t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc)
continue
}
if dLen > uint64(len(dBuf)) {
t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen)
continue
}
for j := range dBuf {
dBuf[j] = byte(notPresentBase + j%notPresentLen)
}
g, gotErr := Decode(dBuf[:], input)
if got := string(g); got != tc.want || gotErr != tc.wantErr {
t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v",
i, tc.desc, got, gotErr, tc.want, tc.wantErr)
continue
}
for j, x := range dBuf {
if uint64(j) < dLen {
continue
}
if w := byte(notPresentBase + j%notPresentLen); x != w {
t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x",
i, tc.desc, j, x, w, dBuf)
continue loop
}
}
}
}
func TestDecodeCopy4(t *testing.T) {
dots := strings.Repeat(".", 65536)
input := strings.Join([]string{
"\x89\x80\x04", // decodedLen = 65545.
"\x0cpqrs", // 4-byte literal "pqrs".
"\xf4\xff\xff" + dots, // 65536-byte literal dots.
"\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540.
}, "")
gotBytes, err := Decode(nil, []byte(input))
if err != nil {
t.Fatal(err)
}
got := string(gotBytes)
want := "pqrs" + dots + "pqrs."
if len(got) != len(want) {
t.Fatalf("got %d bytes, want %d", len(got), len(want))
}
if got != want {
for i := 0; i < len(got); i++ {
if g, w := got[i], want[i]; g != w {
t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w)
}
}
}
}
// TestDecodeLengthOffset tests decoding an encoding of the form literal +
// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB".
func TestDecodeLengthOffset(t *testing.T) {
const (
prefix = "abcdefghijklmnopqr"
suffix = "ABCDEFGHIJKLMNOPQR"
// notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
// not present in either the input or the output. It is written to
// gotBuf to check that Decode does not write bytes past the end of
// gotBuf[:totalLen].
//
// The magic number 37 was chosen because it is prime. A more 'natural'
// number like 32 might lead to a false negative if, for example, a
// byte was incorrectly copied 4*8 bytes later.
notPresentBase = 0xa0
notPresentLen = 37
)
var gotBuf, wantBuf, inputBuf [128]byte
for length := 1; length <= 18; length++ {
for offset := 1; offset <= 18; offset++ {
loop:
for suffixLen := 0; suffixLen <= 18; suffixLen++ {
totalLen := len(prefix) + length + suffixLen
inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen))
inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1)
inputLen++
inputLen += copy(inputBuf[inputLen:], prefix)
inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1)
inputBuf[inputLen+1] = byte(offset)
inputBuf[inputLen+2] = 0x00
inputLen += 3
if suffixLen > 0 {
inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1)
inputLen++
inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen])
}
input := inputBuf[:inputLen]
for i := range gotBuf {
gotBuf[i] = byte(notPresentBase + i%notPresentLen)
}
got, err := Decode(gotBuf[:], input)
if err != nil {
t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err)
continue
}
wantLen := 0
wantLen += copy(wantBuf[wantLen:], prefix)
for i := 0; i < length; i++ {
wantBuf[wantLen] = wantBuf[wantLen-offset]
wantLen++
}
wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen])
want := wantBuf[:wantLen]
for _, x := range input {
if notPresentBase <= x && x < notPresentBase+notPresentLen {
t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x",
length, offset, suffixLen, x, input)
continue loop
}
}
for i, x := range gotBuf {
if i < totalLen {
continue
}
if w := byte(notPresentBase + i%notPresentLen); x != w {
t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+
"Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x",
length, offset, suffixLen, totalLen, i, x, w, gotBuf)
continue loop
}
}
for _, x := range want {
if notPresentBase <= x && x < notPresentBase+notPresentLen {
t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x",
length, offset, suffixLen, x, want)
continue loop
}
}
if !bytes.Equal(got, want) {
t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x",
length, offset, suffixLen, input, got, want)
continue
}
}
}
}
}
const (
goldenText = "Mark.Twain-Tom.Sawyer.txt"
goldenCompressed = goldenText + ".rawsnappy"
)
func TestDecodeGoldenInput(t *testing.T) {
tDir := filepath.FromSlash(*testdataDir)
src, err := os.ReadFile(filepath.Join(tDir, goldenCompressed))
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
got, err := Decode(nil, src)
if err != nil {
t.Fatalf("Decode: %v", err)
}
want, err := os.ReadFile(filepath.Join(tDir, goldenText))
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if err := cmp(got, want); err != nil {
t.Fatal(err)
}
}
// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm
// described in decode_amd64.s and its claim of a 10 byte overrun worst case.
func TestSlowForwardCopyOverrun(t *testing.T) {
const base = 100
for length := 1; length < 18; length++ {
for offset := 1; offset < 18; offset++ {
highWaterMark := base
d := base
l := length
o := offset
// makeOffsetAtLeast8
for o < 8 {
if end := d + 8; highWaterMark < end {
highWaterMark = end
}
l -= o
d += o
o += o
}
// fixUpSlowForwardCopy
a := d
d += l
// finishSlowForwardCopy
for l > 0 {
if end := a + 8; highWaterMark < end {
highWaterMark = end
}
a += 8
l -= 8
}
dWant := base + length
overrun := highWaterMark - dWant
if d != dWant || overrun < 0 || 10 < overrun {
t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])",
length, offset, d, overrun, dWant)
}
}
}
}
// TestEncoderSkip will test skipping various sizes and block types.
func TestEncoderSkip(t *testing.T) {
for ti, origLen := range []int{10 << 10, 256 << 10, 2 << 20, 8 << 20} {
if testing.Short() && ti > 1 {
break
}
t.Run(fmt.Sprint(origLen), func(t *testing.T) {
src := make([]byte, origLen)
rng := rand.New(rand.NewSource(1))
firstHalf, secondHalf := src[:origLen/2], src[origLen/2:]
bonus := secondHalf[len(secondHalf)-origLen/10:]
for i := range firstHalf {
// Incompressible.
firstHalf[i] = uint8(rng.Intn(256))
}
for i := range secondHalf {
// Easy to compress.
secondHalf[i] = uint8(i & 32)
}
for i := range bonus {
// Incompressible.
bonus[i] = uint8(rng.Intn(256))
}
var dst bytes.Buffer
enc := NewWriter(&dst, WriterBlockSize(64<<10))
_, err := io.Copy(enc, bytes.NewBuffer(src))
if err != nil {
t.Fatal(err)
}
err = enc.Close()
if err != nil {
t.Fatal(err)
}
compressed := dst.Bytes()
dec := NewReader(nil)
for i := 0; i < len(src); i += len(src)/20 - 17 {
t.Run(fmt.Sprint("skip-", i), func(t *testing.T) {
want := src[i:]
dec.Reset(bytes.NewBuffer(compressed))
// Read some of it first
read, err := io.CopyN(io.Discard, dec, int64(len(want)/10))
if err != nil {
t.Fatal(err)
}
// skip what we just read.
want = want[read:]
err = dec.Skip(int64(i))
if err != nil {
t.Fatal(err)
}
got, err := io.ReadAll(dec)
if err != nil {
t.Errorf("Skipping %d returned error: %v", i, err)
return
}
if !bytes.Equal(want, got) {
t.Log("got len:", len(got))
t.Log("want len:", len(want))
t.Errorf("Skipping %d did not return correct data (content mismatch)", i)
return
}
})
if testing.Short() && i > 0 {
return
}
}
})
}
}
// TestEncodeNoiseThenRepeats encodes input for which the first half is very
// incompressible and the second half is very compressible. The encoded form's
// length should be closer to 50% of the original length than 100%.
func TestEncodeNoiseThenRepeats(t *testing.T) {
for _, origLen := range []int{256 * 1024, 2048 * 1024} {
src := make([]byte, origLen)
rng := rand.New(rand.NewSource(1))
firstHalf, secondHalf := src[:origLen/2], src[origLen/2:]
for i := range firstHalf {
firstHalf[i] = uint8(rng.Intn(256))
}
for i := range secondHalf {
secondHalf[i] = uint8(i >> 8)
}
dst := Encode(nil, src)
if got, want := len(dst), origLen*3/4; got >= want {
t.Fatalf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want)
}
t.Log(len(dst))
}
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxBlockSize (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
bw := NewWriter(buf)
if _, err := bw.Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
err := bw.Close()
if err != nil {
t.Fatal(err)
}
dst, err := io.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestFramingFormatBetter(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxBlockSize (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
bw := NewWriter(buf, WriterBetterCompression())
if _, err := bw.Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
err := bw.Close()
if err != nil {
t.Fatal(err)
}
dst, err := io.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestEmitLiteral(t *testing.T) {
testCases := []struct {
length int
want string
}{
{1, "\x00"},
{2, "\x04"},
{59, "\xe8"},
{60, "\xec"},
{61, "\xf0\x3c"},
{62, "\xf0\x3d"},
{254, "\xf0\xfd"},
{255, "\xf0\xfe"},
{256, "\xf0\xff"},
{257, "\xf4\x00\x01"},
{65534, "\xf4\xfd\xff"},
{65535, "\xf4\xfe\xff"},
{65536, "\xf4\xff\xff"},
}
dst := make([]byte, 70000)
nines := bytes.Repeat([]byte{0x99}, 65536)
for _, tc := range testCases {
lit := nines[:tc.length]
n := emitLiteral(dst, lit)
if !bytes.HasSuffix(dst[:n], lit) {
t.Errorf("length=%d: did not end with that many literal bytes", tc.length)
continue
}
got := string(dst[:n-tc.length])
if got != tc.want {
t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want)
continue
}
}
}
func TestEmitCopy(t *testing.T) {
testCases := []struct {
offset int
length int
want string
}{
{8, 04, "\x01\x08"},
{8, 11, "\x1d\x08"},
{8, 12, "\x2e\x08\x00"},
{8, 13, "\x32\x08\x00"},
{8, 59, "\xea\x08\x00"},
{8, 60, "\xee\x08\x00"},
{8, 61, "\xf2\x08\x00"},
{8, 62, "\xf6\x08\x00"},
{8, 63, "\xfa\x08\x00"},
{8, 64, "\xfe\x08\x00"},
{8, 65, "\x11\b\x15\x001"},
{8, 66, "\x11\b\x15\x002"},
{8, 67, "\x11\b\x15\x003"},
{8, 68, "\x11\b\x15\x004"},
{8, 69, "\x11\b\x15\x005"},
{8, 80, "\x11\b\x15\x00@"},
{8, 800, "\x11\b\x19\x00\x14\x02"},
{8, 800000, "\x11\b\x1d\x00\xf44\v"},
{256, 04, "\x21\x00"},
{256, 11, "\x3d\x00"},
{256, 12, "\x2e\x00\x01"},
{256, 13, "\x32\x00\x01"},
{256, 59, "\xea\x00\x01"},
{256, 60, "\xee\x00\x01"},
{256, 61, "\xf2\x00\x01"},
{256, 62, "\xf6\x00\x01"},
{256, 63, "\xfa\x00\x01"},
{256, 64, "\xfe\x00\x01"},
{256, 65, "1\x00\x15\x001"},
{256, 66, "1\x00\x15\x002"},
{256, 67, "1\x00\x15\x003"},
{256, 68, "1\x00\x15\x004"},
{256, 69, "1\x00\x15\x005"},
{256, 80, "1\x00\x15\x00@"},
{256, 800, "1\x00\x19\x00\x14\x02"},
{256, 80000, "1\x00\x1d\x00t8\x00"},
{2048, 04, "\x0e\x00\x08"},
{2048, 11, "\x2a\x00\x08"},
{2048, 12, "\x2e\x00\x08"},
{2048, 13, "\x32\x00\x08"},
{2048, 59, "\xea\x00\x08"},
{2048, 60, "\xee\x00\x08"},
{2048, 61, "\xf2\x00\x08"},
{2048, 62, "\xf6\x00\x08"},
{2048, 63, "\xfa\x00\x08"},
{2048, 64, "\xfe\x00\x08"},
{2048, 65, "\xee\x00\x08\x05\x00"},
{2048, 66, "\xee\x00\x08\x09\x00"},
{2048, 67, "\xee\x00\x08\x0d\x00"},
{2048, 68, "\xee\x00\x08\x11\x00"},
{2048, 69, "\xee\x00\x08\x15\x00\x01"},
{2048, 80, "\xee\x00\x08\x15\x00\x0c"},
{2048, 800, "\xee\x00\x08\x19\x00\xe0\x01"},
{2048, 80000, "\xee\x00\x08\x1d\x00\x40\x38\x00"},
{204800, 04, "\x0f\x00\x20\x03\x00"},
{204800, 65, "\xff\x00\x20\x03\x00\x03\x00\x20\x03\x00"},
{204800, 69, "\xff\x00\x20\x03\x00\x05\x00"},
{204800, 800, "\xff\x00\x20\x03\x00\x19\x00\xdc\x01"},
{204800, 80000, "\xff\x00\x20\x03\x00\x1d\x00\x3c\x38\x00"},
}
dst := make([]byte, 1024)
for _, tc := range testCases {
n := emitCopy(dst, tc.offset, tc.length)
got := string(dst[:n])
if got != tc.want {
t.Errorf("offset=%d, length=%d:\ngot %q\nwant %q", tc.offset, tc.length, got, tc.want)
}
}
}
func TestNewWriter(t *testing.T) {
// Test all 32 possible sub-sequences of these 5 input slices.
//
// Their lengths sum to 400,000, which is over 6 times the Writer ibuf
// capacity: 6 * maxBlockSize is 393,216.
inputs := [][]byte{
bytes.Repeat([]byte{'a'}, 40000),
bytes.Repeat([]byte{'b'}, 150000),
bytes.Repeat([]byte{'c'}, 60000),
bytes.Repeat([]byte{'d'}, 120000),
bytes.Repeat([]byte{'e'}, 30000),
}
loop:
for i := 0; i < 1<<uint(len(inputs)); i++ {
var want []byte
buf := new(bytes.Buffer)
w := NewWriter(buf)
for j, input := range inputs {
if i&(1<<uint(j)) == 0 {
continue
}
if _, err := w.Write(input); err != nil {
t.Errorf("i=%#02x: j=%d: Write: %v", i, j, err)
continue loop
}
want = append(want, input...)
}
if err := w.Close(); err != nil {
t.Errorf("i=%#02x: Close: %v", i, err)
continue
}
got, err := io.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("i=%#02x: ReadAll: %v", i, err)
continue
}
if err := cmp(got, want); err != nil {
t.Errorf("i=%#02x: %v", i, err)
continue
}
}
}
func TestFlush(t *testing.T) {
buf := new(bytes.Buffer)
w := NewWriter(buf)
defer w.Close()
if _, err := w.Write(bytes.Repeat([]byte{'x'}, 20)); err != nil {
t.Fatalf("Write: %v", err)
}
if n := buf.Len(); n != 0 {
t.Fatalf("before Flush: %d bytes were written to the underlying io.Writer, want 0", n)
}
if err := w.Flush(); err != nil {
t.Fatalf("Flush: %v", err)
}
if n := buf.Len(); n == 0 {
t.Fatalf("after Flush: %d bytes were written to the underlying io.Writer, want non-0", n)
}
}
func TestReaderUncompressedDataOK(t *testing.T) {
r := NewReader(strings.NewReader(magicChunk +
"\x01\x08\x00\x00" + // Uncompressed chunk, 8 bytes long (including 4 byte checksum).
"\x68\x10\xe6\xb6" + // Checksum.
"\x61\x62\x63\x64", // Uncompressed payload: "abcd".
))
g, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if got, want := string(g), "abcd"; got != want {
t.Fatalf("got %q, want %q", got, want)
}
}
func TestReaderUncompressedDataNoPayload(t *testing.T) {
r := NewReader(strings.NewReader(magicChunk +
"\x01\x04\x00\x00" + // Uncompressed chunk, 4 bytes long.
"", // No payload; corrupt input.
))
if _, err := io.ReadAll(r); err != ErrCorrupt {
t.Fatalf("got %v, want %v", err, ErrCorrupt)
}
}
func TestReaderUncompressedDataTooLong(t *testing.T) {
// The maximum legal chunk length... is 4MB + 4 bytes checksum.
n := maxBlockSize + checksumSize
n32 := uint32(n)
r := NewReader(strings.NewReader(magicChunk +
// Uncompressed chunk, n bytes long.
string([]byte{chunkTypeUncompressedData, uint8(n32), uint8(n32 >> 8), uint8(n32 >> 16)}) +
strings.Repeat("\x00", n),
))
// CRC is not set, so we should expect that error.
if _, err := io.ReadAll(r); err != ErrCRC {
t.Fatalf("got %v, want %v", err, ErrCRC)
}
// test first invalid.
n++
n32 = uint32(n)
r = NewReader(strings.NewReader(magicChunk +
// Uncompressed chunk, n bytes long.
string([]byte{chunkTypeUncompressedData, uint8(n32), uint8(n32 >> 8), uint8(n32 >> 16)}) +
strings.Repeat("\x00", n),
))
if _, err := io.ReadAll(r); err != ErrCorrupt {
t.Fatalf("got %v, want %v", err, ErrCorrupt)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
w := NewWriter(buf)
_, err := w.Write(gold)
if err != nil {
t.Fatalf("Write: %v", err)
}
err = w.Close()
if err != nil {
t.Fatalf("Close: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := io.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
const n = 20
w := NewWriter(nil)
defer w.Close()
var gots, wants [][]byte
failed := false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
if err := w.Flush(); err != nil {
t.Errorf("#%d: Flush: %v", i, err)
failed = true
got, err := io.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
continue
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
}
func TestWriterResetWithoutFlush(t *testing.T) {
buf0 := new(bytes.Buffer)
buf1 := new(bytes.Buffer)
w := NewWriter(buf0)
if _, err := w.Write([]byte("xxx")); err != nil {
t.Fatalf("Write #0: %v", err)
}
// Note that we don't Flush the Writer before calling Reset.
w.Reset(buf1)
if _, err := w.Write([]byte("yyy")); err != nil {
t.Fatalf("Write #1: %v", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("Flush: %v", err)
}
got, err := io.ReadAll(NewReader(buf1))
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
if err := cmp(got, []byte("yyy")); err != nil {
t.Fatal(err)
}
}
type writeCounter int
func (c *writeCounter) Write(p []byte) (int, error) {
*c++
return len(p), nil
}
// TestNumUnderlyingWrites tests that each Writer flush only makes one or two
// Write calls on its underlying io.Writer, depending on whether or not the
// flushed buffer was compressible.
func TestNumUnderlyingWrites(t *testing.T) {
testCases := []struct {
input []byte
want int
}{
// Magic header + block
{bytes.Repeat([]byte{'x'}, 100), 2},
// One block each:
{bytes.Repeat([]byte{'y'}, 100), 1},
{[]byte("ABCDEFGHIJKLMNOPQRST"), 1},
}
// If we are doing sync writes, we write uncompressed as two writes.
if runtime.GOMAXPROCS(0) == 1 {
testCases[2].want++
}
var c writeCounter
w := NewWriter(&c)
defer w.Close()
for i, tc := range testCases {
c = 0
if _, err := w.Write(tc.input); err != nil {
t.Errorf("#%d: Write: %v", i, err)
continue
}
if err := w.Flush(); err != nil {
t.Errorf("#%d: Flush: %v", i, err)
continue
}
if int(c) != tc.want {
t.Errorf("#%d: got %d underlying writes, want %d", i, c, tc.want)
continue
}
}
}
func testWriterRoundtrip(t *testing.T, src []byte, opts ...WriterOption) {
var buf bytes.Buffer
enc := NewWriter(&buf, opts...)
n, err := enc.Write(src)
if err != nil {
t.Error(err)
return
}
if n != len(src) {
t.Error(io.ErrShortWrite)
return
}
err = enc.Flush()
if err != nil {
t.Error(err)
return
}
// Extra flush and close should be noops.
err = enc.Flush()
if err != nil {
t.Error(err)
return
}
err = enc.Close()
if err != nil {
t.Error(err)
return
}
t.Logf("encoded to %d -> %d bytes", len(src), buf.Len())
dec := NewReader(&buf)
decoded, err := io.ReadAll(dec)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
}
func testBlockRoundtrip(t *testing.T, src []byte) {
dst := Encode(nil, src)
t.Logf("encoded to %d -> %d bytes", len(src), len(dst))
decoded, err := Decode(nil, dst)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(decoded, src)
if err != nil {
t.Error(err)
}
}
func testBetterBlockRoundtrip(t *testing.T, src []byte) {
dst := EncodeBetter(nil, src)
t.Logf("encoded to %d -> %d bytes", len(src), len(dst))
decoded, err := Decode(nil, dst)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
}
func testBestBlockRoundtrip(t *testing.T, src []byte) {
dst := EncodeBest(nil, src)
t.Logf("encoded to %d -> %d bytes", len(src), len(dst))
decoded, err := Decode(nil, dst)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
}
func testSnappyBlockRoundtrip(t *testing.T, src []byte) {
// Write with s2, decode with snapref.
t.Run("regular", func(t *testing.T) {
dst := EncodeSnappy(nil, src)
t.Logf("encoded to %d -> %d bytes", len(src), len(dst))
decoded, err := snapref.Decode(nil, dst)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
})
t.Run("better", func(t *testing.T) {
dst := EncodeSnappyBetter(nil, src)
t.Logf("encoded to %d -> %d bytes", len(src), len(dst))
decoded, err := snapref.Decode(nil, dst)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
})
t.Run("best", func(t *testing.T) {
dst := EncodeSnappyBest(nil, src)
t.Logf("encoded to %d -> %d bytes", len(src), len(dst))
decoded, err := snapref.Decode(nil, dst)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
})
}
func testSnappyDecode(t *testing.T, src []byte) {
var buf bytes.Buffer
enc := snapref.NewBufferedWriter(&buf)
n, err := enc.Write(src)
if err != nil {
t.Error(err)
return
}
if n != len(src) {
t.Error(io.ErrShortWrite)
return
}
enc.Close()
t.Logf("encoded to %d -> %d bytes", len(src), buf.Len())
dec := NewReader(&buf)
decoded, err := io.ReadAll(dec)
if err != nil {
t.Error(err)
return
}
if len(decoded) != len(src) {
t.Error("decoded len:", len(decoded), "!=", len(src))
return
}
err = cmp(src, decoded)
if err != nil {
t.Error(err)
}
}
func benchDecode(b *testing.B, src []byte) {
b.Run("default", func(b *testing.B) {
encoded := Encode(nil, src)
b.SetBytes(int64(len(src)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := Decode(src[:0], encoded)
if err != nil {
b.Fatal(err)
}
}
b.ReportMetric(100*float64(len(encoded))/float64(len(src)), "pct")
})
b.Run("better", func(b *testing.B) {
encoded := EncodeBetter(nil, src)
b.SetBytes(int64(len(src)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := Decode(src[:0], encoded)
if err != nil {
b.Fatal(err)
}
}
b.ReportMetric(100*float64(len(encoded))/float64(len(src)), "pct")
})
b.Run("best", func(b *testing.B) {
encoded := EncodeBest(nil, src)
b.SetBytes(int64(len(src)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := Decode(src[:0], encoded)
if err != nil {
b.Fatal(err)
}
}
b.ReportMetric(100*float64(len(encoded))/float64(len(src)), "pct")
})
b.Run("snappy-input", func(b *testing.B) {
encoded := snapref.Encode(nil, src)
b.SetBytes(int64(len(src)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := Decode(src[:0], encoded)
if err != nil {
b.Fatal(err)
}
}
b.ReportMetric(100*float64(len(encoded))/float64(len(src)), "pct")
})
}
func benchEncode(b *testing.B, src []byte) {
// Bandwidth is in amount of uncompressed data.
dst := make([]byte, snapref.MaxEncodedLen(len(src)))
b.ResetTimer()
b.Run("default", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
Encode(dst, src)
}
b.ReportMetric(100*float64(len(Encode(dst, src)))/float64(len(src)), "pct")
})
b.Run("better", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
EncodeBetter(dst, src)
}
b.ReportMetric(100*float64(len(EncodeBetter(dst, src)))/float64(len(src)), "pct")
})
b.Run("best", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
EncodeBest(dst, src)
}
b.ReportMetric(100*float64(len(EncodeBest(dst, src)))/float64(len(src)), "pct")
})
b.Run("snappy-default", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
EncodeSnappy(dst, src)
}
b.ReportMetric(100*float64(len(EncodeSnappy(dst, src)))/float64(len(src)), "pct")
})
b.Run("snappy-better", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
EncodeSnappyBetter(dst, src)
}
b.ReportMetric(100*float64(len(EncodeSnappyBetter(dst, src)))/float64(len(src)), "pct")
})
b.Run("snappy-best", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
EncodeSnappyBest(dst, src)
}
b.ReportMetric(100*float64(len(EncodeSnappyBest(dst, src)))/float64(len(src)), "pct")
})
b.Run("snappy-ref-noasm", func(b *testing.B) {
b.SetBytes(int64(len(src)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
snapref.Encode(dst, src)
}
b.ReportMetric(100*float64(len(snapref.Encode(dst, src)))/float64(len(src)), "pct")
})
}
func testOrBenchmark(b testing.TB) string {
if _, ok := b.(*testing.B); ok {
return "benchmark"
}
return "test"
}
func readFile(b testing.TB, filename string) []byte {
src, err := os.ReadFile(filename)
if err != nil {
b.Skipf("skipping %s: %v", testOrBenchmark(b), err)
}
if len(src) == 0 {
b.Fatalf("%s has zero length", filename)
}
return src
}
// expand returns a slice of length n containing mutated copies of src.
func expand(src []byte, n int) []byte {
dst := make([]byte, n)
cnt := uint8(0)
for x := dst; len(x) > 0; cnt++ {
idx := copy(x, src)
for i := range x {
if i >= len(src) {
break
}
x[i] = src[i] ^ cnt
}
x = x[idx:]
}
return dst
}
func benchTwain(b *testing.B, n int, decode bool) {
data := expand(readFile(b, "../testdata/Mark.Twain-Tom.Sawyer.txt"), n)
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
func BenchmarkTwainDecode1e1(b *testing.B) { benchTwain(b, 1e1, true) }
func BenchmarkTwainDecode1e2(b *testing.B) { benchTwain(b, 1e2, true) }
func BenchmarkTwainDecode1e3(b *testing.B) { benchTwain(b, 1e3, true) }
func BenchmarkTwainDecode1e4(b *testing.B) { benchTwain(b, 1e4, true) }
func BenchmarkTwainDecode1e5(b *testing.B) { benchTwain(b, 1e5, true) }
func BenchmarkTwainDecode1e6(b *testing.B) { benchTwain(b, 1e6, true) }
func BenchmarkTwainDecode1e7(b *testing.B) { benchTwain(b, 1e7, true) }
func BenchmarkTwainEncode1e1(b *testing.B) { benchTwain(b, 1e1, false) }
func BenchmarkTwainEncode1e2(b *testing.B) { benchTwain(b, 1e2, false) }
func BenchmarkTwainEncode1e3(b *testing.B) { benchTwain(b, 1e3, false) }
func BenchmarkTwainEncode1e4(b *testing.B) { benchTwain(b, 1e4, false) }
func BenchmarkTwainEncode1e5(b *testing.B) { benchTwain(b, 1e5, false) }
func BenchmarkTwainEncode1e6(b *testing.B) { benchTwain(b, 1e6, false) }
func BenchmarkTwainEncode1e7(b *testing.B) { benchTwain(b, 1e7, false) }
func BenchmarkRandomEncodeBlock1MB(b *testing.B) {
rng := rand.New(rand.NewSource(1))
data := make([]byte, 1<<20)
for i := range data {
data[i] = uint8(rng.Intn(256))
}
benchEncode(b, data)
}
func BenchmarkRandomEncodeBetterBlock16MB(b *testing.B) {
rng := rand.New(rand.NewSource(1))
data := make([]byte, 16<<20)
for i := range data {
data[i] = uint8(rng.Intn(256))
}
benchEncode(b, data)
}
// testFiles' values are copied directly from
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
filename string
sizeLimit int
}{
{"html", "html", 0},
{"urls", "urls.10K", 0},
{"jpg", "fireworks.jpeg", 0},
{"jpg_200b", "fireworks.jpeg", 200},
{"pdf", "paper-100k.pdf", 0},
{"html4", "html_x_4", 0},
{"txt1", "alice29.txt", 0},
{"txt2", "asyoulik.txt", 0},
{"txt3", "lcet10.txt", 0},
{"txt4", "plrabn12.txt", 0},
{"pb", "geo.protodata", 0},
{"gaviota", "kppkn.gtb", 0},
{"txt1_128b", "alice29.txt", 128},
{"txt1_1000b", "alice29.txt", 1000},
{"txt1_10000b", "alice29.txt", 10000},
{"txt1_20000b", "alice29.txt", 20000},
}
const (
// The benchmark data files are at this canonical URL.
benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
)
func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) {
bDir := filepath.FromSlash(*benchdataDir)
filename := filepath.Join(bDir, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b))
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create %s: %s", bDir, err)
}
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
}
defer f.Close()
defer func() {
if errRet != nil {
os.Remove(filename)
}
}()
url := benchURL + basename
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func TestEstimateBlockSize(t *testing.T) {
var input []byte
for i := 0; i < 100; i++ {
EstimateBlockSize(input)
input = append(input, 0)
}
}
func benchFile(b *testing.B, i int, decode bool) {
if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
bDir := filepath.FromSlash(*benchdataDir)
data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
if !decode {
b.Run("est-size", func(b *testing.B) {
if n := testFiles[i].sizeLimit; 0 < n && n < len(data) {
data = data[:n]
}
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_ = EstimateBlockSize(data)
}
})
sz := float64(EstimateBlockSize(data))
if sz > 0 {
b.ReportMetric(100*sz/float64(len(data)), "pct")
b.ReportMetric(sz, "B")
}
})
}
b.Run("block", func(b *testing.B) {
if n := testFiles[i].sizeLimit; 0 < n && n < len(data) {
data = data[:n]
}
if decode {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
encoded := Encode(nil, data)
tmp := make([]byte, len(data))
for pb.Next() {
var err error
tmp, err = Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, MaxEncodedLen(len(data)))
tmp := make([]byte, len(data))
for pb.Next() {
res := Encode(dst, data)
if len(res) == 0 {
panic(0)
}
if false {
tmp, _ = Decode(tmp, res)
if !bytes.Equal(tmp, data) {
panic("wrong")
}
}
}
})
}
b.ReportMetric(100*float64(len(Encode(nil, data)))/float64(len(data)), "pct")
b.ReportMetric(float64(len(Encode(nil, data))), "B")
})
b.Run("block-better", func(b *testing.B) {
if decode {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
encoded := EncodeBetter(nil, data)
tmp := make([]byte, len(data))
for pb.Next() {
var err error
tmp, err = Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, MaxEncodedLen(len(data)))
tmp := make([]byte, len(data))
for pb.Next() {
res := EncodeBetter(dst, data)
if len(res) == 0 {
panic(0)
}
if false {
tmp, _ = Decode(tmp, res)
if !bytes.Equal(tmp, data) {
panic("wrong")
}
}
}
})
}
b.ReportMetric(100*float64(len(EncodeBetter(nil, data)))/float64(len(data)), "pct")
b.ReportMetric(float64(len(EncodeBetter(nil, data))), "B")
})
b.Run("block-best", func(b *testing.B) {
if decode {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
encoded := EncodeBest(nil, data)
tmp := make([]byte, len(data))
for pb.Next() {
var err error
tmp, err = Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
b.ReportMetric(100*float64(len(EncodeBest(nil, data)))/float64(len(data)), "pct")
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, MaxEncodedLen(len(data)))
tmp := make([]byte, len(data))
for pb.Next() {
res := EncodeBest(dst, data)
if len(res) == 0 {
panic(0)
}
if false {
tmp, _ = Decode(tmp, res)
if !bytes.Equal(tmp, data) {
panic("wrong")
}
}
}
})
b.ReportMetric(100*float64(len(EncodeBest(nil, data)))/float64(len(data)), "pct")
}
b.ReportMetric(float64(len(EncodeBest(nil, data))), "B")
})
}
func benchFileSnappy(b *testing.B, i int, decode bool) {
if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
bDir := filepath.FromSlash(*benchdataDir)
data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
if n := testFiles[i].sizeLimit; 0 < n && n < len(data) {
data = data[:n]
}
b.Run("s2-snappy", func(b *testing.B) {
if decode {
b.SetBytes(int64(len(data)))
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
encoded := EncodeSnappy(nil, data)
tmp := make([]byte, len(data))
for pb.Next() {
var err error
tmp, err = Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
b.ReportMetric(100*float64(len(EncodeSnappy(nil, data)))/float64(len(data)), "pct")
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, MaxEncodedLen(len(data)))
for pb.Next() {
res := EncodeSnappy(dst, data)
if len(res) == 0 {
panic(0)
}
}
})
b.ReportMetric(100*float64(len(EncodeSnappy(nil, data)))/float64(len(data)), "pct")
}
b.ReportMetric(float64(len(EncodeSnappy(nil, data))), "B")
})
b.Run("s2-snappy-better", func(b *testing.B) {
if decode {
b.SetBytes(int64(len(data)))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
encoded := EncodeSnappyBetter(nil, data)
tmp := make([]byte, len(data))
b.ReportAllocs()
b.ResetTimer()
for pb.Next() {
var err error
tmp, err = Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
b.ReportMetric(100*float64(len(EncodeSnappyBetter(nil, data)))/float64(len(data)), "pct")
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, MaxEncodedLen(len(data)))
tmp := make([]byte, len(data))
for pb.Next() {
res := EncodeSnappyBetter(dst, data)
if len(res) == 0 {
panic(0)
}
if false {
tmp, _ = Decode(tmp, res)
if !bytes.Equal(tmp, data) {
panic("wrong")
}
}
}
})
b.ReportMetric(100*float64(len(EncodeSnappyBetter(nil, data)))/float64(len(data)), "pct")
}
b.ReportMetric(float64(len(EncodeSnappyBetter(nil, data))), "B")
})
b.Run("s2-snappy-best", func(b *testing.B) {
if decode {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
encoded := EncodeSnappyBest(nil, data)
tmp := make([]byte, len(data))
for pb.Next() {
var err error
tmp, err = Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
b.ReportMetric(100*float64(len(EncodeSnappyBest(nil, data)))/float64(len(data)), "pct")
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, MaxEncodedLen(len(data)))
tmp := make([]byte, len(data))
for pb.Next() {
res := EncodeSnappyBest(dst, data)
if len(res) == 0 {
panic(0)
}
if false {
tmp, _ = snapref.Decode(tmp, res)
if !bytes.Equal(tmp, data) {
panic("wrong")
}
}
}
})
b.ReportMetric(100*float64(len(EncodeSnappyBest(nil, data)))/float64(len(data)), "pct")
}
b.ReportMetric(float64(len(EncodeSnappyBest(nil, data))), "B")
})
b.Run("snappy-noasm", func(b *testing.B) {
if decode {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
encoded := snapref.Encode(nil, data)
tmp := make([]byte, len(data))
for pb.Next() {
var err error
tmp, err = snapref.Decode(tmp, encoded)
if err != nil {
b.Fatal(err)
}
}
})
b.ReportMetric(100*float64(len(snapref.Encode(nil, data)))/float64(len(data)), "pct")
} else {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, snapref.MaxEncodedLen(len(data)))
tmp := make([]byte, len(data))
for pb.Next() {
res := snapref.Encode(dst, data)
if len(res) == 0 {
panic(0)
}
if false {
tmp, _ = snapref.Decode(tmp, res)
if !bytes.Equal(tmp, data) {
panic("wrong")
}
}
}
})
b.ReportMetric(100*float64(len(snapref.Encode(nil, data)))/float64(len(data)), "pct")
}
})
}
func TestRoundtrips(t *testing.T) {
testFile(t, 0, 10)
testFile(t, 1, 10)
testFile(t, 2, 10)
testFile(t, 3, 10)
testFile(t, 4, 10)
testFile(t, 5, 10)
testFile(t, 6, 10)
testFile(t, 7, 10)
testFile(t, 8, 10)
testFile(t, 9, 10)
testFile(t, 10, 10)
testFile(t, 11, 10)
testFile(t, 12, 0)
testFile(t, 13, 0)
testFile(t, 14, 0)
testFile(t, 15, 0)
}
func testFile(t *testing.T, i, repeat int) {
if err := downloadBenchmarkFiles(t, testFiles[i].filename); err != nil {
t.Skipf("failed to download testdata: %s", err)
}
if testing.Short() {
repeat = 0
}
t.Run(fmt.Sprint(i, "-", testFiles[i].label), func(t *testing.T) {
bDir := filepath.FromSlash(*benchdataDir)
data := readFile(t, filepath.Join(bDir, testFiles[i].filename))
if testing.Short() && len(data) > 10000 {
t.SkipNow()
}
oSize := len(data)
for i := 0; i < repeat; i++ {
data = append(data, data[:oSize]...)
}
t.Run("s2", func(t *testing.T) {
testWriterRoundtrip(t, data)
})
t.Run("s2-better", func(t *testing.T) {
testWriterRoundtrip(t, data, WriterBetterCompression())
})
t.Run("s2-best", func(t *testing.T) {
testWriterRoundtrip(t, data, WriterBestCompression())
})
t.Run("s2-uncompressed", func(t *testing.T) {
testWriterRoundtrip(t, data, WriterUncompressed())
})
t.Run("block", func(t *testing.T) {
d := data
testBlockRoundtrip(t, d)
})
t.Run("block-better", func(t *testing.T) {
d := data
testBetterBlockRoundtrip(t, d)
})
t.Run("block-best", func(t *testing.T) {
d := data
testBestBlockRoundtrip(t, d)
})
t.Run("s2-snappy", func(t *testing.T) {
d := data
testSnappyBlockRoundtrip(t, d)
})
t.Run("snappy", func(t *testing.T) {
testSnappyDecode(t, data)
})
})
}
func TestDataRoundtrips(t *testing.T) {
test := func(t *testing.T, data []byte) {
t.Run("s2", func(t *testing.T) {
testWriterRoundtrip(t, data)
})
t.Run("s2-better", func(t *testing.T) {
testWriterRoundtrip(t, data, WriterBetterCompression())
})
t.Run("s2-best", func(t *testing.T) {
testWriterRoundtrip(t, data, WriterBestCompression())
})
t.Run("block", func(t *testing.T) {
d := data
testBlockRoundtrip(t, d)
})
t.Run("block-better", func(t *testing.T) {
d := data
testBetterBlockRoundtrip(t, d)
})
t.Run("block-best", func(t *testing.T) {
d := data
testBestBlockRoundtrip(t, d)
})
t.Run("s2-snappy", func(t *testing.T) {
d := data
testSnappyBlockRoundtrip(t, d)
})
t.Run("snappy", func(t *testing.T) {
testSnappyDecode(t, data)
})
}
t.Run("longblock", func(t *testing.T) {
data := make([]byte, 1<<25)
if testing.Short() {
data = data[:1<<20]
}
test(t, data)
})
t.Run("4f9e1a0", func(t *testing.T) {
comp, _ := os.ReadFile("testdata/4f9e1a0da7915a3d69632f5613ed78bc998a8a23.zst")
dec, _ := zstd.NewReader(bytes.NewBuffer(comp))
data, _ := io.ReadAll(dec)
test(t, data)
})
data, err := os.ReadFile("testdata/enc_regressions.zip")
if err != nil {
t.Fatal(err)
}
zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
t.Fatal(err)
}
for _, tt := range zr.File {
if !strings.HasSuffix(t.Name(), "") {
continue
}
t.Run(tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Error(err)
return
}
b, err := io.ReadAll(r)
if err != nil {
t.Error(err)
return
}
test(t, b[:len(b):len(b)])
})
}
}
func BenchmarkDecodeS2BlockParallel(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
benchFile(b, i, true)
})
}
}
func BenchmarkEncodeS2BlockParallel(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
benchFile(b, i, false)
})
}
}
func BenchmarkDecodeSnappyBlockParallel(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
benchFileSnappy(b, i, true)
})
}
}
func BenchmarkEncodeSnappyBlockParallel(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
benchFileSnappy(b, i, false)
})
}
}
func TestMatchLen(t *testing.T) {
// ref is a simple, reference implementation of matchLen.
ref := func(a, b []byte) int {
n := 0
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
// We allow slightly shorter matches at the end of slices
const maxBelow = 0
nums := []int{0, 1, 2, 7, 8, 9, 16, 20, 29, 30, 31, 32, 33, 34, 38, 39, 40}
for yIndex := 40; yIndex > 30; yIndex-- {
xxx := bytes.Repeat([]byte("x"), 40)
if yIndex < len(xxx) {
xxx[yIndex] = 'y'
}
for _, i := range nums {
for _, j := range nums {
if i >= j {
continue
}
got := matchLen(xxx[j:], xxx[i:])
want := ref(xxx[j:], xxx[i:])
if got > want {
t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want)
continue
}
if got < want-maxBelow {
t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want)
}
}
}
}
}
func BenchmarkDecodeBlockSingle(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
bDir := filepath.FromSlash(*benchdataDir)
data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
if testFiles[i].sizeLimit > 0 && len(data) > testFiles[i].sizeLimit {
data = data[:testFiles[i].sizeLimit]
}
benchDecode(b, data)
})
}
}
func BenchmarkDecodeBlockParallel(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
benchFile(b, i, true)
})
}
}
func BenchmarkEncodeBlockSingle(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
bDir := filepath.FromSlash(*benchdataDir)
data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
if testFiles[i].sizeLimit > 0 && len(data) > testFiles[i].sizeLimit {
data = data[:testFiles[i].sizeLimit]
}
benchEncode(b, data)
})
}
}
func BenchmarkEncodeBlockParallel(b *testing.B) {
for i := range testFiles {
b.Run(fmt.Sprint(i, "-", testFiles[i].label), func(b *testing.B) {
benchFile(b, i, false)
})
}
}
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Copyright (c) 2019+ Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"runtime"
"sync"
"github.com/klauspost/compress/internal/race"
)
const (
levelUncompressed = iota + 1
levelFast
levelBetter
levelBest
)
// NewWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// Users must call Close to guarantee all data has been forwarded to
// the underlying io.Writer and that resources are released.
// They may also call Flush zero or more times before calling Close.
func NewWriter(w io.Writer, opts ...WriterOption) *Writer {
w2 := Writer{
blockSize: defaultBlockSize,
concurrency: runtime.GOMAXPROCS(0),
randSrc: rand.Reader,
level: levelFast,
}
for _, opt := range opts {
if err := opt(&w2); err != nil {
w2.errState = err
return &w2
}
}
w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize)
w2.paramsOK = true
w2.ibuf = make([]byte, 0, w2.blockSize)
w2.buffers.New = func() interface{} {
return make([]byte, w2.obufLen)
}
w2.Reset(w)
return &w2
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
type Writer struct {
errMu sync.Mutex
errState error
// ibuf is a buffer for the incoming (uncompressed) bytes.
ibuf []byte
blockSize int
obufLen int
concurrency int
written int64
uncompWritten int64 // Bytes sent to compression
output chan chan result
buffers sync.Pool
pad int
writer io.Writer
randSrc io.Reader
writerWg sync.WaitGroup
index Index
customEnc func(dst, src []byte) int
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
paramsOK bool
snappy bool
flushOnWrite bool
appendIndex bool
bufferCB func([]byte)
level uint8
}
type result struct {
b []byte
// return when writing
ret []byte
// Uncompressed start offset
startOffset int64
}
// err returns the previously set error.
// If no error has been set it is set to err if not nil.
func (w *Writer) err(err error) error {
w.errMu.Lock()
errSet := w.errState
if errSet == nil && err != nil {
w.errState = err
errSet = err
}
w.errMu.Unlock()
return errSet
}
// Reset discards the writer's state and switches the Snappy writer to write to w.
// This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
if !w.paramsOK {
return
}
// Close previous writer, if any.
if w.output != nil {
close(w.output)
w.writerWg.Wait()
w.output = nil
}
w.errState = nil
w.ibuf = w.ibuf[:0]
w.wroteStreamHeader = false
w.written = 0
w.writer = writer
w.uncompWritten = 0
w.index.reset(w.blockSize)
// If we didn't get a writer, stop here.
if writer == nil {
return
}
// If no concurrency requested, don't spin up writer goroutine.
if w.concurrency == 1 {
return
}
toWrite := make(chan chan result, w.concurrency)
w.output = toWrite
w.writerWg.Add(1)
// Start a writer goroutine that will write all output in order.
go func() {
defer w.writerWg.Done()
// Get a queued write.
for write := range toWrite {
// Wait for the data to be available.
input := <-write
if input.ret != nil && w.bufferCB != nil {
w.bufferCB(input.ret)
input.ret = nil
}
in := input.b
if len(in) > 0 {
if w.err(nil) == nil {
// Don't expose data from previous buffers.
toWrite := in[:len(in):len(in)]
// Write to output.
n, err := writer.Write(toWrite)
if err == nil && n != len(toWrite) {
err = io.ErrShortBuffer
}
_ = w.err(err)
w.err(w.index.add(w.written, input.startOffset))
w.written += int64(n)
}
}
if cap(in) >= w.obufLen {
w.buffers.Put(in)
}
// close the incoming write request.
// This can be used for synchronizing flushes.
close(write)
}
}()
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if err := w.err(nil); err != nil {
return 0, err
}
if w.flushOnWrite {
return w.write(p)
}
// If we exceed the input buffer size, start writing
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
}
nRet += n
p = p[n:]
}
if err := w.err(nil); err != nil {
return nRet, err
}
// p should always be able to fit into w.ibuf now.
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
// ReadFrom implements the io.ReaderFrom interface.
// Using this is typically more efficient since it avoids a memory copy.
// ReadFrom reads data from r until EOF or error.
// The return value n is the number of bytes read.
// Any error except io.EOF encountered during the read is also returned.
func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
if err := w.err(nil); err != nil {
return 0, err
}
if len(w.ibuf) > 0 {
err := w.AsyncFlush()
if err != nil {
return 0, err
}
}
if br, ok := r.(byter); ok {
buf := br.Bytes()
if err := w.EncodeBuffer(buf); err != nil {
return 0, err
}
return int64(len(buf)), w.AsyncFlush()
}
for {
inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen]
n2, err := io.ReadFull(r, inbuf[obufHeaderLen:])
if err != nil {
if err == io.ErrUnexpectedEOF {
err = io.EOF
}
if err != io.EOF {
return n, w.err(err)
}
}
if n2 == 0 {
if cap(inbuf) >= w.obufLen {
w.buffers.Put(inbuf)
}
break
}
n += int64(n2)
err2 := w.writeFull(inbuf[:n2+obufHeaderLen])
if w.err(err2) != nil {
break
}
if err != nil {
// We got EOF and wrote everything
break
}
}
return n, w.err(nil)
}
// AddSkippableBlock will add a skippable block to the stream.
// The ID must be 0x80-0xfe (inclusive).
// Length of the skippable block must be <= 16777215 bytes.
func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
if err := w.err(nil); err != nil {
return err
}
if len(data) == 0 {
return nil
}
if id < 0x80 || id > chunkTypePadding {
return fmt.Errorf("invalid skippable block id %x", id)
}
if len(data) > maxChunkSize {
return fmt.Errorf("skippable block excessed maximum size")
}
var header [4]byte
chunkLen := len(data)
header[0] = id
header[1] = uint8(chunkLen >> 0)
header[2] = uint8(chunkLen >> 8)
header[3] = uint8(chunkLen >> 16)
if w.concurrency == 1 {
write := func(b []byte) error {
n, err := w.writer.Write(b)
if err = w.err(err); err != nil {
return err
}
if n != len(b) {
return w.err(io.ErrShortWrite)
}
w.written += int64(n)
return w.err(nil)
}
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
if w.snappy {
if err := write([]byte(magicChunkSnappy)); err != nil {
return err
}
} else {
if err := write([]byte(magicChunk)); err != nil {
return err
}
}
}
if err := write(header[:]); err != nil {
return err
}
return write(data)
}
// Create output...
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
hWriter := make(chan result)
w.output <- hWriter
if w.snappy {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
} else {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
}
}
// Copy input.
inbuf := w.buffers.Get().([]byte)[:4]
copy(inbuf, header[:])
inbuf = append(inbuf, data...)
output := make(chan result, 1)
// Queue output.
w.output <- output
output <- result{startOffset: w.uncompWritten, b: inbuf}
return nil
}
// EncodeBuffer will add a buffer to the stream.
// This is the fastest way to encode a stream,
// but the input buffer cannot be written to by the caller
// until Flush or Close has been called when concurrency != 1.
//
// Use the WriterBufferDone to receive a callback when the buffer is done
// Processing.
//
// Note that input is not buffered.
// This means that each write will result in discrete blocks being created.
// For buffered writes, use the regular Write function.
func (w *Writer) EncodeBuffer(buf []byte) (err error) {
if err := w.err(nil); err != nil {
return err
}
if w.flushOnWrite {
_, err := w.write(buf)
return err
}
// Flush queued data first.
if len(w.ibuf) > 0 {
err := w.AsyncFlush()
if err != nil {
return err
}
}
if w.concurrency == 1 {
_, err := w.writeSync(buf)
if w.bufferCB != nil {
w.bufferCB(buf)
}
return err
}
// Spawn goroutine and write block to output channel.
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
hWriter := make(chan result)
w.output <- hWriter
if w.snappy {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
} else {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
}
}
orgBuf := buf
for len(buf) > 0 {
// Cut input.
uncompressed := buf
if len(uncompressed) > w.blockSize {
uncompressed = uncompressed[:w.blockSize]
}
buf = buf[len(uncompressed):]
// Get an output buffer.
obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
race.WriteSlice(obuf)
output := make(chan result)
// Queue output now, so we keep order.
w.output <- output
res := result{
startOffset: w.uncompWritten,
}
w.uncompWritten += int64(len(uncompressed))
if len(buf) == 0 && w.bufferCB != nil {
res.ret = orgBuf
}
go func() {
race.ReadSlice(uncompressed)
checksum := crc(uncompressed)
// Set to uncompressed.
chunkType := uint8(chunkTypeUncompressedData)
chunkLen := 4 + len(uncompressed)
// Attempt compressing.
n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
// Check if we should use this, or store as uncompressed instead.
if n2 > 0 {
chunkType = uint8(chunkTypeCompressedData)
chunkLen = 4 + n + n2
obuf = obuf[:obufHeaderLen+n+n2]
} else {
// copy uncompressed
copy(obuf[obufHeaderLen:], uncompressed)
}
// Fill in the per-chunk header that comes before the body.
obuf[0] = chunkType
obuf[1] = uint8(chunkLen >> 0)
obuf[2] = uint8(chunkLen >> 8)
obuf[3] = uint8(chunkLen >> 16)
obuf[4] = uint8(checksum >> 0)
obuf[5] = uint8(checksum >> 8)
obuf[6] = uint8(checksum >> 16)
obuf[7] = uint8(checksum >> 24)
// Queue final output.
res.b = obuf
output <- res
}()
}
return nil
}
func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
if w.customEnc != nil {
if ret := w.customEnc(obuf, uncompressed); ret >= 0 {
return ret
}
}
if w.snappy {
switch w.level {
case levelFast:
return encodeBlockSnappy(obuf, uncompressed)
case levelBetter:
return encodeBlockBetterSnappy(obuf, uncompressed)
case levelBest:
return encodeBlockBestSnappy(obuf, uncompressed)
}
return 0
}
switch w.level {
case levelFast:
return encodeBlock(obuf, uncompressed)
case levelBetter:
return encodeBlockBetter(obuf, uncompressed)
case levelBest:
return encodeBlockBest(obuf, uncompressed, nil)
}
return 0
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if err := w.err(nil); err != nil {
return 0, err
}
if w.concurrency == 1 {
return w.writeSync(p)
}
// Spawn goroutine and write block to output channel.
for len(p) > 0 {
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
hWriter := make(chan result)
w.output <- hWriter
if w.snappy {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
} else {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
}
}
var uncompressed []byte
if len(p) > w.blockSize {
uncompressed, p = p[:w.blockSize], p[w.blockSize:]
} else {
uncompressed, p = p, nil
}
// Copy input.
// If the block is incompressible, this is used for the result.
inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen]
obuf := w.buffers.Get().([]byte)[:w.obufLen]
copy(inbuf[obufHeaderLen:], uncompressed)
uncompressed = inbuf[obufHeaderLen:]
output := make(chan result)
// Queue output now, so we keep order.
w.output <- output
res := result{
startOffset: w.uncompWritten,
}
w.uncompWritten += int64(len(uncompressed))
go func() {
checksum := crc(uncompressed)
// Set to uncompressed.
chunkType := uint8(chunkTypeUncompressedData)
chunkLen := 4 + len(uncompressed)
// Attempt compressing.
n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
// Check if we should use this, or store as uncompressed instead.
if n2 > 0 {
chunkType = uint8(chunkTypeCompressedData)
chunkLen = 4 + n + n2
obuf = obuf[:obufHeaderLen+n+n2]
} else {
// Use input as output.
obuf, inbuf = inbuf, obuf
}
// Fill in the per-chunk header that comes before the body.
obuf[0] = chunkType
obuf[1] = uint8(chunkLen >> 0)
obuf[2] = uint8(chunkLen >> 8)
obuf[3] = uint8(chunkLen >> 16)
obuf[4] = uint8(checksum >> 0)
obuf[5] = uint8(checksum >> 8)
obuf[6] = uint8(checksum >> 16)
obuf[7] = uint8(checksum >> 24)
// Queue final output.
res.b = obuf
output <- res
// Put unused buffer back in pool.
w.buffers.Put(inbuf)
}()
nRet += len(uncompressed)
}
return nRet, nil
}
// writeFull is a special version of write that will always write the full buffer.
// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer.
// The data will be written as a single block.
// The caller is not allowed to use inbuf after this function has been called.
func (w *Writer) writeFull(inbuf []byte) (errRet error) {
if err := w.err(nil); err != nil {
return err
}
if w.concurrency == 1 {
_, err := w.writeSync(inbuf[obufHeaderLen:])
if cap(inbuf) >= w.obufLen {
w.buffers.Put(inbuf)
}
return err
}
// Spawn goroutine and write block to output channel.
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
hWriter := make(chan result)
w.output <- hWriter
if w.snappy {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes}
} else {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
}
}
// Get an output buffer.
obuf := w.buffers.Get().([]byte)[:w.obufLen]
uncompressed := inbuf[obufHeaderLen:]
output := make(chan result)
// Queue output now, so we keep order.
w.output <- output
res := result{
startOffset: w.uncompWritten,
}
w.uncompWritten += int64(len(uncompressed))
go func() {
checksum := crc(uncompressed)
// Set to uncompressed.
chunkType := uint8(chunkTypeUncompressedData)
chunkLen := 4 + len(uncompressed)
// Attempt compressing.
n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
// Check if we should use this, or store as uncompressed instead.
if n2 > 0 {
chunkType = uint8(chunkTypeCompressedData)
chunkLen = 4 + n + n2
obuf = obuf[:obufHeaderLen+n+n2]
} else {
// Use input as output.
obuf, inbuf = inbuf, obuf
}
// Fill in the per-chunk header that comes before the body.
obuf[0] = chunkType
obuf[1] = uint8(chunkLen >> 0)
obuf[2] = uint8(chunkLen >> 8)
obuf[3] = uint8(chunkLen >> 16)
obuf[4] = uint8(checksum >> 0)
obuf[5] = uint8(checksum >> 8)
obuf[6] = uint8(checksum >> 16)
obuf[7] = uint8(checksum >> 24)
// Queue final output.
res.b = obuf
output <- res
// Put unused buffer back in pool.
w.buffers.Put(inbuf)
}()
return nil
}
func (w *Writer) writeSync(p []byte) (nRet int, errRet error) {
if err := w.err(nil); err != nil {
return 0, err
}
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
var n int
var err error
if w.snappy {
n, err = w.writer.Write(magicChunkSnappyBytes)
} else {
n, err = w.writer.Write(magicChunkBytes)
}
if err != nil {
return 0, w.err(err)
}
if n != len(magicChunk) {
return 0, w.err(io.ErrShortWrite)
}
w.written += int64(n)
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > w.blockSize {
uncompressed, p = p[:w.blockSize], p[w.blockSize:]
} else {
uncompressed, p = p, nil
}
obuf := w.buffers.Get().([]byte)[:w.obufLen]
checksum := crc(uncompressed)
// Set to uncompressed.
chunkType := uint8(chunkTypeUncompressedData)
chunkLen := 4 + len(uncompressed)
// Attempt compressing.
n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed)))
n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed)
if n2 > 0 {
chunkType = uint8(chunkTypeCompressedData)
chunkLen = 4 + n + n2
obuf = obuf[:obufHeaderLen+n+n2]
} else {
obuf = obuf[:8]
}
// Fill in the per-chunk header that comes before the body.
obuf[0] = chunkType
obuf[1] = uint8(chunkLen >> 0)
obuf[2] = uint8(chunkLen >> 8)
obuf[3] = uint8(chunkLen >> 16)
obuf[4] = uint8(checksum >> 0)
obuf[5] = uint8(checksum >> 8)
obuf[6] = uint8(checksum >> 16)
obuf[7] = uint8(checksum >> 24)
n, err := w.writer.Write(obuf)
if err != nil {
return 0, w.err(err)
}
if n != len(obuf) {
return 0, w.err(io.ErrShortWrite)
}
w.err(w.index.add(w.written, w.uncompWritten))
w.written += int64(n)
w.uncompWritten += int64(len(uncompressed))
if chunkType == chunkTypeUncompressedData {
// Write uncompressed data.
n, err := w.writer.Write(uncompressed)
if err != nil {
return 0, w.err(err)
}
if n != len(uncompressed) {
return 0, w.err(io.ErrShortWrite)
}
w.written += int64(n)
}
w.buffers.Put(obuf)
// Queue final output.
nRet += len(uncompressed)
}
return nRet, nil
}
// AsyncFlush writes any buffered bytes to a block and starts compressing it.
// It does not wait for the output has been written as Flush() does.
func (w *Writer) AsyncFlush() error {
if err := w.err(nil); err != nil {
return err
}
// Queue any data still in input buffer.
if len(w.ibuf) != 0 {
if !w.wroteStreamHeader {
_, err := w.writeSync(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err(err)
} else {
_, err := w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
err = w.err(err)
if err != nil {
return err
}
}
}
return w.err(nil)
}
// Flush flushes the Writer to its underlying io.Writer.
// This does not apply padding.
func (w *Writer) Flush() error {
if err := w.AsyncFlush(); err != nil {
return err
}
if w.output == nil {
return w.err(nil)
}
// Send empty buffer
res := make(chan result)
w.output <- res
// Block until this has been picked up.
res <- result{b: nil, startOffset: w.uncompWritten}
// When it is closed, we have flushed.
<-res
return w.err(nil)
}
// Close calls Flush and then closes the Writer.
// Calling Close multiple times is ok,
// but calling CloseIndex after this will make it not return the index.
func (w *Writer) Close() error {
_, err := w.closeIndex(w.appendIndex)
return err
}
// CloseIndex calls Close and returns an index on first call.
// This is not required if you are only adding index to a stream.
func (w *Writer) CloseIndex() ([]byte, error) {
return w.closeIndex(true)
}
func (w *Writer) closeIndex(idx bool) ([]byte, error) {
err := w.Flush()
if w.output != nil {
close(w.output)
w.writerWg.Wait()
w.output = nil
}
var index []byte
if w.err(err) == nil && w.writer != nil {
// Create index.
if idx {
compSize := int64(-1)
if w.pad <= 1 {
compSize = w.written
}
index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize)
// Count as written for padding.
if w.appendIndex {
w.written += int64(len(index))
}
}
if w.pad > 1 {
tmp := w.ibuf[:0]
if len(index) > 0 {
// Allocate another buffer.
tmp = w.buffers.Get().([]byte)[:0]
defer w.buffers.Put(tmp)
}
add := calcSkippableFrame(w.written, int64(w.pad))
frame, err := skippableFrame(tmp, add, w.randSrc)
if err = w.err(err); err != nil {
return nil, err
}
n, err2 := w.writer.Write(frame)
if err2 == nil && n != len(frame) {
err2 = io.ErrShortWrite
}
_ = w.err(err2)
}
if len(index) > 0 && w.appendIndex {
n, err2 := w.writer.Write(index)
if err2 == nil && n != len(index) {
err2 = io.ErrShortWrite
}
_ = w.err(err2)
}
}
err = w.err(errClosed)
if err == errClosed {
return index, nil
}
return nil, err
}
// calcSkippableFrame will return a total size to be added for written
// to be divisible by multiple.
// The value will always be > skippableFrameHeader.
// The function will panic if written < 0 or wantMultiple <= 0.
func calcSkippableFrame(written, wantMultiple int64) int {
if wantMultiple <= 0 {
panic("wantMultiple <= 0")
}
if written < 0 {
panic("written < 0")
}
leftOver := written % wantMultiple
if leftOver == 0 {
return 0
}
toAdd := wantMultiple - leftOver
for toAdd < skippableFrameHeader {
toAdd += wantMultiple
}
return int(toAdd)
}
// skippableFrame will add a skippable frame with a total size of bytes.
// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader
func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
if total == 0 {
return dst, nil
}
if total < skippableFrameHeader {
return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total)
}
if int64(total) >= maxBlockSize+skippableFrameHeader {
return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total)
}
// Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)"
dst = append(dst, chunkTypePadding)
f := uint32(total - skippableFrameHeader)
// Add chunk length.
dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16))
// Add data
start := len(dst)
dst = append(dst, make([]byte, f)...)
_, err := io.ReadFull(r, dst[start:])
return dst, err
}
var errClosed = errors.New("s2: Writer is closed")
// WriterOption is an option for creating a encoder.
type WriterOption func(*Writer) error
// WriterConcurrency will set the concurrency,
// meaning the maximum number of decoders to run concurrently.
// The value supplied must be at least 1.
// By default this will be set to GOMAXPROCS.
func WriterConcurrency(n int) WriterOption {
return func(w *Writer) error {
if n <= 0 {
return errors.New("concurrency must be at least 1")
}
w.concurrency = n
return nil
}
}
// WriterAddIndex will append an index to the end of a stream
// when it is closed.
func WriterAddIndex() WriterOption {
return func(w *Writer) error {
w.appendIndex = true
return nil
}
}
// WriterBetterCompression will enable better compression.
// EncodeBetter compresses better than Encode but typically with a
// 10-40% speed decrease on both compression and decompression.
func WriterBetterCompression() WriterOption {
return func(w *Writer) error {
w.level = levelBetter
return nil
}
}
// WriterBestCompression will enable better compression.
// EncodeBest compresses better than Encode but typically with a
// big speed decrease on compression.
func WriterBestCompression() WriterOption {
return func(w *Writer) error {
w.level = levelBest
return nil
}
}
// WriterUncompressed will bypass compression.
// The stream will be written as uncompressed blocks only.
// If concurrency is > 1 CRC and output will still be done async.
func WriterUncompressed() WriterOption {
return func(w *Writer) error {
w.level = levelUncompressed
return nil
}
}
// WriterBufferDone will perform a callback when EncodeBuffer has finished
// writing a buffer to the output and the buffer can safely be reused.
// If the buffer was split into several blocks, it will be sent after the last block.
// Callbacks will not be done concurrently.
func WriterBufferDone(fn func(b []byte)) WriterOption {
return func(w *Writer) error {
w.bufferCB = fn
return nil
}
}
// WriterBlockSize allows to override the default block size.
// Blocks will be this size or smaller.
// Minimum size is 4KB and maximum size is 4MB.
//
// Bigger blocks may give bigger throughput on systems with many cores,
// and will increase compression slightly, but it will limit the possible
// concurrency for smaller payloads for both encoding and decoding.
// Default block size is 1MB.
//
// When writing Snappy compatible output using WriterSnappyCompat,
// the maximum block size is 64KB.
func WriterBlockSize(n int) WriterOption {
return func(w *Writer) error {
if w.snappy && n > maxSnappyBlockSize || n < minBlockSize {
return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output")
}
if n > maxBlockSize || n < minBlockSize {
return errors.New("s2: block size too large. Must be <= 4MB and >=4KB")
}
w.blockSize = n
return nil
}
}
// WriterPadding will add padding to all output so the size will be a multiple of n.
// This can be used to obfuscate the exact output size or make blocks of a certain size.
// The contents will be a skippable frame, so it will be invisible by the decoder.
// n must be > 0 and <= 4MB.
// The padded area will be filled with data from crypto/rand.Reader.
// The padding will be applied whenever Close is called on the writer.
func WriterPadding(n int) WriterOption {
return func(w *Writer) error {
if n <= 0 {
return fmt.Errorf("s2: padding must be at least 1")
}
// No need to waste our time.
if n == 1 {
w.pad = 0
}
if n > maxBlockSize {
return fmt.Errorf("s2: padding must less than 4MB")
}
w.pad = n
return nil
}
}
// WriterPaddingSrc will get random data for padding from the supplied source.
// By default crypto/rand is used.
func WriterPaddingSrc(reader io.Reader) WriterOption {
return func(w *Writer) error {
w.randSrc = reader
return nil
}
}
// WriterSnappyCompat will write snappy compatible output.
// The output can be decompressed using either snappy or s2.
// If block size is more than 64KB it is set to that.
func WriterSnappyCompat() WriterOption {
return func(w *Writer) error {
w.snappy = true
if w.blockSize > 64<<10 {
// We choose 8 bytes less than 64K, since that will make literal emits slightly more effective.
// And allows us to skip some size checks.
w.blockSize = (64 << 10) - 8
}
return nil
}
}
// WriterFlushOnWrite will compress blocks on each call to the Write function.
//
// This is quite inefficient as blocks size will depend on the write size.
//
// Use WriterConcurrency(1) to also make sure that output is flushed.
// When Write calls return, otherwise they will be written when compression is done.
func WriterFlushOnWrite() WriterOption {
return func(w *Writer) error {
w.flushOnWrite = true
return nil
}
}
// WriterCustomEncoder allows to override the encoder for blocks on the stream.
// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
// Block size (initial varint) should not be added by the encoder.
// Returning value 0 indicates the block could not be compressed.
// Returning a negative value indicates that compression should be attempted.
// The function should expect to be called concurrently.
func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
return func(w *Writer) error {
w.customEnc = fn
return nil
}
}
package xerial
import (
"bytes"
"encoding/binary"
"errors"
"github.com/klauspost/compress/s2"
)
var (
xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
// This is xerial version 1 and minimally compatible with version 1
xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1}
// ErrMalformed is returned by the decoder when the xerial framing
// is malformed
ErrMalformed = errors.New("malformed xerial framing")
)
// Encode *appends* to the specified 'dst' the compressed
// 'src' in xerial framing format. If 'dst' does not have enough
// capacity, then a new slice will be allocated. If 'dst' has
// non-zero length, then if *must* have been built using this function.
func Encode(dst, src []byte) []byte {
if len(dst) == 0 {
dst = append(dst, xerialHeader...)
dst = append(dst, xerialVersionInfo...)
}
// Snappy encode in blocks of maximum 32KB
var (
max = len(src)
blockSize = 32 * 1024
pos = 0
chunk []byte
)
for pos < max {
newPos := min(pos+blockSize, max)
// Find maximum length we need
needLen := s2.MaxEncodedLen(newPos-pos) + 4
if cap(dst)-len(dst) >= needLen {
// Encode directly into dst
dstStart := len(dst) + 4 // Start offset in dst
dstSizePos := dst[len(dst):dstStart] // Reserve space for compressed size
dstEnd := len(dst) + needLen // End offset in dst
// Compress into dst and get actual size.
actual := s2.EncodeSnappy(dst[dstStart:dstEnd], src[pos:newPos])
// Update dst size
dst = dst[:dstStart+len(actual)]
// Store compressed size
binary.BigEndian.PutUint32(dstSizePos, uint32(len(actual)))
} else {
chunk = s2.EncodeSnappy(chunk[:cap(chunk)], src[pos:newPos])
origLen := len(dst)
// First encode the compressed size (big-endian)
// Put* panics if the buffer is too small, so pad 4 bytes first
dst = append(dst, dst[0:4]...)
binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk)))
// And now the compressed data
dst = append(dst, chunk...)
}
pos = newPos
}
return dst
}
// EncodeBetter *appends* to the specified 'dst' the compressed
// 'src' in xerial framing format. If 'dst' does not have enough
// capacity, then a new slice will be allocated. If 'dst' has
// non-zero length, then if *must* have been built using this function.
func EncodeBetter(dst, src []byte) []byte {
if len(dst) == 0 {
dst = append(dst, xerialHeader...)
dst = append(dst, xerialVersionInfo...)
}
// Snappy encode in blocks of maximum 32KB
var (
max = len(src)
blockSize = 32 * 1024
pos = 0
chunk []byte
)
for pos < max {
newPos := min(pos+blockSize, max)
// Find maximum length we need
needLen := s2.MaxEncodedLen(newPos-pos) + 4
if cap(dst)-len(dst) >= needLen {
// Encode directly into dst
dstStart := len(dst) + 4 // Start offset in dst
dstSizePos := dst[len(dst):dstStart] // Reserve space for compressed size
dstEnd := len(dst) + needLen // End offset in dst
// Compress into dst and get actual size.
actual := s2.EncodeSnappyBetter(dst[dstStart:dstEnd], src[pos:newPos])
// Update dst size
dst = dst[:dstStart+len(actual)]
// Store compressed size
binary.BigEndian.PutUint32(dstSizePos, uint32(len(actual)))
} else {
chunk = s2.EncodeSnappyBetter(chunk[:cap(chunk)], src[pos:newPos])
origLen := len(dst)
// First encode the compressed size (big-endian)
// Put* panics if the buffer is too small, so pad 4 bytes first
dst = append(dst, dst[0:4]...)
binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk)))
// And now the compressed data
dst = append(dst, chunk...)
}
pos = newPos
}
return dst
}
const (
sizeOffset = 16
sizeBytes = 4
)
// Decode decodes snappy data whether it is traditional unframed
// or includes the xerial framing format.
func Decode(src []byte) ([]byte, error) {
return DecodeInto(nil, src)
}
// DecodeInto decodes snappy data whether it is traditional unframed
// or includes the xerial framing format into the specified `dst`.
// It is assumed that the entirety of `dst` including all capacity is available
// for use by this function. If `dst` is nil *or* insufficiently large to hold
// the decoded `src`, new space will be allocated.
// To never allocate bigger destination, use DecodeCapped.
func DecodeInto(dst, src []byte) ([]byte, error) {
var max = len(src)
if max < len(xerialHeader) || !bytes.Equal(src[:8], xerialHeader) {
dst, err := s2.Decode(dst[:cap(dst)], src)
if err != nil {
return dst, ErrMalformed
}
return dst, nil
}
if max == sizeOffset {
return []byte{}, nil
}
if max < sizeOffset+sizeBytes {
return nil, ErrMalformed
}
if len(dst) > 0 {
dst = dst[:0]
}
var (
pos = sizeOffset
chunk []byte
)
for pos+sizeBytes <= max {
size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes]))
pos += sizeBytes
nextPos := pos + size
// On architectures where int is 32-bytes wide size + pos could
// overflow so we need to check the low bound as well as the
// high
if nextPos < pos || nextPos > max {
return nil, ErrMalformed
}
nextLen, err := s2.DecodedLen(src[pos:nextPos])
if err != nil {
return nil, err
}
if cap(dst)-len(dst) >= nextLen {
// Decode directly into dst
dstStart := len(dst)
dstEnd := dstStart + nextLen
_, err = s2.Decode(dst[dstStart:dstEnd], src[pos:nextPos])
if err != nil {
return nil, err
}
dst = dst[:dstEnd]
} else {
chunk, err = s2.Decode(chunk[:cap(chunk)], src[pos:nextPos])
if err != nil {
return nil, err
}
dst = append(dst, chunk...)
}
pos = nextPos
}
return dst, nil
}
var ErrDstTooSmall = errors.New("destination buffer too small")
// DecodeCapped decodes snappy data whether it is traditional unframed
// or includes the xerial framing format into the specified `dst`.
// It is assumed that the entirety of `dst` including all capacity is available
// for use by this function. If `dst` is nil *or* insufficiently large to hold
// the decoded `src`, ErrDstTooSmall is returned.
func DecodeCapped(dst, src []byte) ([]byte, error) {
var max = len(src)
if dst == nil {
return nil, ErrDstTooSmall
}
if max < len(xerialHeader) || !bytes.Equal(src[:8], xerialHeader) {
l, err := s2.DecodedLen(src)
if err != nil {
return nil, ErrMalformed
}
if l > cap(dst) {
return nil, ErrDstTooSmall
}
return s2.Decode(dst[:cap(dst)], src)
}
dst = dst[:0]
if max == sizeOffset {
return dst, nil
}
if max < sizeOffset+sizeBytes {
return nil, ErrMalformed
}
pos := sizeOffset
for pos+sizeBytes <= max {
size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes]))
pos += sizeBytes
nextPos := pos + size
// On architectures where int is 32-bytes wide size + pos could
// overflow so we need to check the low bound as well as the
// high
if nextPos < pos || nextPos > max {
return nil, ErrMalformed
}
nextLen, err := s2.DecodedLen(src[pos:nextPos])
if err != nil {
return nil, err
}
if cap(dst)-len(dst) < nextLen {
return nil, ErrDstTooSmall
}
// Decode directly into dst
dstStart := len(dst)
dstEnd := dstStart + nextLen
_, err = s2.Decode(dst[dstStart:dstEnd], src[pos:nextPos])
if err != nil {
return nil, err
}
dst = dst[:dstEnd]
pos = nextPos
}
return dst, nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"bufio"
"encoding/binary"
"errors"
"hash"
"hash/crc32"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/klauspost/compress/internal/godebug"
)
var (
ErrFormat = errors.New("zip: not a valid zip file")
ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
ErrChecksum = errors.New("zip: checksum error")
ErrInsecurePath = errors.New("zip: insecure file path")
)
// A Reader serves content from a ZIP archive.
type Reader struct {
r io.ReaderAt
File []*File
Comment string
decompressors map[uint16]Decompressor
// Some JAR files are zip files with a prefix that is a bash script.
// The baseOffset field is the start of the zip file proper.
baseOffset int64
// fileList is a list of files sorted by ename,
// for use by the Open method.
fileListOnce sync.Once
fileList []fileListEntry
}
// A ReadCloser is a [Reader] that must be closed when no longer needed.
type ReadCloser struct {
f *os.File
Reader
}
// A File is a single file in a ZIP archive.
// The file information is in the embedded [FileHeader].
// The file content can be accessed by calling [File.Open].
type File struct {
FileHeader
zip *Reader
zipr io.ReaderAt
headerOffset int64 // includes overall ZIP archive baseOffset
zip64 bool // zip64 extended information extra field presence
}
// OpenReader will open the Zip file specified by name and return a ReadCloser.
//
// If any file inside the archive uses a non-local name
// (as defined by [filepath.IsLocal]) or a name containing backslashes
// and the GODEBUG environment variable contains `zipinsecurepath=0`,
// OpenReader returns the reader with an ErrInsecurePath error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the ErrInsecurePath error and use the returned reader.
func OpenReader(name string) (*ReadCloser, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
r := new(ReadCloser)
if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath {
f.Close()
return nil, err
}
r.f = f
return r, err
}
// NewReader returns a new [Reader] reading from r, which is assumed to
// have the given size in bytes.
//
// If any file inside the archive uses a non-local name
// (as defined by [filepath.IsLocal]) or a name containing backslashes
// and the GODEBUG environment variable contains `zipinsecurepath=0`,
// NewReader returns the reader with an [ErrInsecurePath] error.
// A future version of Go may introduce this behavior by default.
// Programs that want to accept non-local names can ignore
// the [ErrInsecurePath] error and use the returned reader.
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
if size < 0 {
return nil, errors.New("zip: size cannot be negative")
}
zr := new(Reader)
var err error
if err = zr.init(r, size); err != nil && err != ErrInsecurePath {
return nil, err
}
return zr, err
}
func (r *Reader) init(rdr io.ReaderAt, size int64) error {
end, baseOffset, err := readDirectoryEnd(rdr, size)
if err != nil {
return err
}
r.r = rdr
r.baseOffset = baseOffset
// Since the number of directory records is not validated, it is not
// safe to preallocate r.File without first checking that the specified
// number of files is reasonable, since a malformed archive may
// indicate it contains up to 1 << 128 - 1 files. Since each file has a
// header which will be _at least_ 30 bytes we can safely preallocate
// if (data size / 30) >= end.directoryRecords.
if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
r.File = make([]*File, 0, end.directoryRecords)
}
r.Comment = end.comment
rs := io.NewSectionReader(rdr, 0, size)
if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil {
return err
}
buf := bufio.NewReader(rs)
// Get once
zipinsecurepath := godebug.Get("zipinsecurepath") == "0"
// The count of files inside a zip is truncated to fit in a uint16.
// Gloss over this by reading headers until we encounter
// a bad one, and then only report an ErrFormat or UnexpectedEOF if
// the file count modulo 65536 is incorrect.
for {
f := &File{zip: r, zipr: rdr}
err = readDirectoryHeader(f, buf)
if err == ErrFormat || err == io.ErrUnexpectedEOF {
break
}
if err != nil {
return err
}
f.headerOffset += r.baseOffset
r.File = append(r.File, f)
}
if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
// Return the readDirectoryHeader error if we read
// the wrong number of directory entries.
return err
}
if zipinsecurepath {
for _, f := range r.File {
if f.Name == "" {
// Zip permits an empty file name field.
continue
}
// The zip specification states that names must use forward slashes,
// so consider any backslashes in the name insecure.
if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) {
return ErrInsecurePath
}
}
}
return nil
}
// RegisterDecompressor registers or overrides a custom decompressor for a
// specific method ID. If a decompressor for a given method is not found,
// [Reader] will default to looking up the decompressor at the package level.
func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
if r.decompressors == nil {
r.decompressors = make(map[uint16]Decompressor)
}
r.decompressors[method] = dcomp
}
func (r *Reader) decompressor(method uint16) Decompressor {
dcomp := r.decompressors[method]
if dcomp == nil {
dcomp = decompressor(method)
}
return dcomp
}
// Close closes the Zip file, rendering it unusable for I/O.
func (rc *ReadCloser) Close() error {
return rc.f.Close()
}
// DataOffset returns the offset of the file's possibly-compressed
// data, relative to the beginning of the zip file.
//
// Most callers should instead use [File.Open], which transparently
// decompresses data and verifies checksums.
func (f *File) DataOffset() (offset int64, err error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
return
}
return f.headerOffset + bodyOffset, nil
}
// Open returns a [ReadCloser] that provides access to the [File]'s contents.
// Multiple files may be read concurrently.
func (f *File) Open() (io.ReadCloser, error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
return nil, err
}
if strings.HasSuffix(f.Name, "/") {
// The ZIP specification (APPNOTE.TXT) specifies that directories, which
// are technically zero-byte files, must not have any associated file
// data. We previously tried failing here if f.CompressedSize64 != 0,
// but it turns out that a number of implementations (namely, the Java
// jar tool) don't properly set the storage method on directories
// resulting in a file with compressed size > 0 but uncompressed size ==
// 0. We still want to fail when a directory has associated uncompressed
// data, but we are tolerant of cases where the uncompressed size is
// zero but compressed size is not.
if f.UncompressedSize64 != 0 {
return &dirReader{ErrFormat}, nil
} else {
return &dirReader{io.EOF}, nil
}
}
size := int64(f.CompressedSize64)
r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
dcomp := f.zip.decompressor(f.Method)
if dcomp == nil {
return nil, ErrAlgorithm
}
var rc io.ReadCloser = dcomp(r)
var desr io.Reader
if f.hasDataDescriptor() {
desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
}
rc = &checksumReader{
rc: rc,
hash: crc32.NewIEEE(),
f: f,
desr: desr,
}
return rc, nil
}
// OpenRaw returns a [Reader] that provides access to the [File]'s contents without
// decompression.
func (f *File) OpenRaw() (io.Reader, error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
return nil, err
}
r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
return r, nil
}
type dirReader struct {
err error
}
func (r *dirReader) Read([]byte) (int, error) {
return 0, r.err
}
func (r *dirReader) Close() error {
return nil
}
type checksumReader struct {
rc io.ReadCloser
hash hash.Hash32
nread uint64 // number of bytes read so far
f *File
desr io.Reader // if non-nil, where to read the data descriptor
err error // sticky error
}
func (r *checksumReader) Stat() (fs.FileInfo, error) {
return headerFileInfo{&r.f.FileHeader}, nil
}
func (r *checksumReader) Read(b []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
n, err = r.rc.Read(b)
r.hash.Write(b[:n])
r.nread += uint64(n)
if r.nread > r.f.UncompressedSize64 {
return 0, ErrFormat
}
if err == nil {
return
}
if err == io.EOF {
if r.nread != r.f.UncompressedSize64 {
return 0, io.ErrUnexpectedEOF
}
if r.desr != nil {
if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
if err1 == io.EOF {
err = io.ErrUnexpectedEOF
} else {
err = err1
}
} else if r.hash.Sum32() != r.f.CRC32 {
err = ErrChecksum
}
} else {
// If there's not a data descriptor, we still compare
// the CRC32 of what we've read against the file header
// or TOC's CRC32, if it seems like it was set.
if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
err = ErrChecksum
}
}
}
r.err = err
return
}
func (r *checksumReader) Close() error { return r.rc.Close() }
// findBodyOffset does the minimum work to verify the file has a header
// and returns the file body offset.
func (f *File) findBodyOffset() (int64, error) {
var buf [fileHeaderLen]byte
if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
return 0, err
}
b := readBuf(buf[:])
if sig := b.uint32(); sig != fileHeaderSignature {
return 0, ErrFormat
}
b = b[22:] // skip over most of the header
filenameLen := int(b.uint16())
extraLen := int(b.uint16())
return int64(fileHeaderLen + filenameLen + extraLen), nil
}
// readDirectoryHeader attempts to read a directory header from r.
// It returns io.ErrUnexpectedEOF if it cannot read a complete header,
// and ErrFormat if it doesn't find a valid header signature.
func readDirectoryHeader(f *File, r io.Reader) error {
var buf [directoryHeaderLen]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
return err
}
b := readBuf(buf[:])
if sig := b.uint32(); sig != directoryHeaderSignature {
return ErrFormat
}
f.CreatorVersion = b.uint16()
f.ReaderVersion = b.uint16()
f.Flags = b.uint16()
f.Method = b.uint16()
f.ModifiedTime = b.uint16()
f.ModifiedDate = b.uint16()
f.CRC32 = b.uint32()
f.CompressedSize = b.uint32()
f.UncompressedSize = b.uint32()
f.CompressedSize64 = uint64(f.CompressedSize)
f.UncompressedSize64 = uint64(f.UncompressedSize)
filenameLen := int(b.uint16())
extraLen := int(b.uint16())
commentLen := int(b.uint16())
b = b[4:] // skipped start disk number and internal attributes (2x uint16)
f.ExternalAttrs = b.uint32()
f.headerOffset = int64(b.uint32())
d := make([]byte, filenameLen+extraLen+commentLen)
if _, err := io.ReadFull(r, d); err != nil {
return err
}
f.Name = string(d[:filenameLen])
f.Extra = d[filenameLen : filenameLen+extraLen]
f.Comment = string(d[filenameLen+extraLen:])
// Determine the character encoding.
utf8Valid1, utf8Require1 := detectUTF8(f.Name)
utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
switch {
case !utf8Valid1 || !utf8Valid2:
// Name and Comment definitely not UTF-8.
f.NonUTF8 = true
case !utf8Require1 && !utf8Require2:
// Name and Comment use only single-byte runes that overlap with UTF-8.
f.NonUTF8 = false
default:
// Might be UTF-8, might be some other encoding; preserve existing flag.
// Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
// Since it is impossible to always distinguish valid UTF-8 from some
// other encoding (e.g., GBK or Shift-JIS), we trust the flag.
f.NonUTF8 = f.Flags&0x800 == 0
}
needUSize := f.UncompressedSize == ^uint32(0)
needCSize := f.CompressedSize == ^uint32(0)
needHeaderOffset := f.headerOffset == int64(^uint32(0))
// Best effort to find what we need.
// Other zip authors might not even follow the basic format,
// and we'll just ignore the Extra content in that case.
var modified time.Time
parseExtras:
for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
fieldTag := extra.uint16()
fieldSize := int(extra.uint16())
if len(extra) < fieldSize {
break
}
fieldBuf := extra.sub(fieldSize)
switch fieldTag {
case zip64ExtraID:
f.zip64 = true
// update directory values from the zip64 extra block.
// They should only be consulted if the sizes read earlier
// are maxed out.
// See golang.org/issue/13367.
if needUSize {
needUSize = false
if len(fieldBuf) < 8 {
return ErrFormat
}
f.UncompressedSize64 = fieldBuf.uint64()
}
if needCSize {
needCSize = false
if len(fieldBuf) < 8 {
return ErrFormat
}
f.CompressedSize64 = fieldBuf.uint64()
}
if needHeaderOffset {
needHeaderOffset = false
if len(fieldBuf) < 8 {
return ErrFormat
}
f.headerOffset = int64(fieldBuf.uint64())
}
case ntfsExtraID:
if len(fieldBuf) < 4 {
continue parseExtras
}
fieldBuf.uint32() // reserved (ignored)
for len(fieldBuf) >= 4 { // need at least tag and size
attrTag := fieldBuf.uint16()
attrSize := int(fieldBuf.uint16())
if len(fieldBuf) < attrSize {
continue parseExtras
}
attrBuf := fieldBuf.sub(attrSize)
if attrTag != 1 || attrSize != 24 {
continue // Ignore irrelevant attributes
}
const ticksPerSecond = 1e7 // Windows timestamp resolution
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
secs := ts / ticksPerSecond
nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
case unixExtraID, infoZipUnixExtraID:
if len(fieldBuf) < 8 {
continue parseExtras
}
fieldBuf.uint32() // AcTime (ignored)
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
case extTimeExtraID:
if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
continue parseExtras
}
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
}
}
msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
f.Modified = msdosModified
if !modified.IsZero() {
f.Modified = modified.UTC()
// If legacy MS-DOS timestamps are set, we can use the delta between
// the legacy and extended versions to estimate timezone offset.
//
// A non-UTC timezone is always used (even if offset is zero).
// Thus, FileHeader.Modified.Location() == time.UTC is useful for
// determining whether extended timestamps are present.
// This is necessary for users that need to do additional time
// calculations when dealing with legacy ZIP formats.
if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
}
}
// Assume that uncompressed size 2³²-1 could plausibly happen in
// an old zip32 file that was sharding inputs into the largest chunks
// possible (or is just malicious; search the web for 42.zip).
// If needUSize is true still, it means we didn't see a zip64 extension.
// As long as the compressed size is not also 2³²-1 (implausible)
// and the header is not also 2³²-1 (equally implausible),
// accept the uncompressed size 2³²-1 as valid.
// If nothing else, this keeps archive/zip working with 42.zip.
_ = needUSize
if needCSize || needHeaderOffset {
return ErrFormat
}
return nil
}
func readDataDescriptor(r io.Reader, f *File) error {
var buf [dataDescriptorLen]byte
// The spec says: "Although not originally assigned a
// signature, the value 0x08074b50 has commonly been adopted
// as a signature value for the data descriptor record.
// Implementers should be aware that ZIP files may be
// encountered with or without this signature marking data
// descriptors and should account for either case when reading
// ZIP files to ensure compatibility."
//
// dataDescriptorLen includes the size of the signature but
// first read just those 4 bytes to see if it exists.
if _, err := io.ReadFull(r, buf[:4]); err != nil {
return err
}
off := 0
maybeSig := readBuf(buf[:4])
if maybeSig.uint32() != dataDescriptorSignature {
// No data descriptor signature. Keep these four
// bytes.
off += 4
}
if _, err := io.ReadFull(r, buf[off:12]); err != nil {
return err
}
b := readBuf(buf[:12])
if b.uint32() != f.CRC32 {
return ErrChecksum
}
// The two sizes that follow here can be either 32 bits or 64 bits
// but the spec is not very clear on this and different
// interpretations has been made causing incompatibilities. We
// already have the sizes from the central directory so we can
// just ignore these.
return nil
}
func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) {
// look for directoryEndSignature in the last 1k, then in the last 65k
var buf []byte
var directoryEndOffset int64
for i, bLen := range []int64{1024, 65 * 1024} {
if bLen > size {
bLen = size
}
buf = make([]byte, int(bLen))
if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
return nil, 0, err
}
if p := findSignatureInBlock(buf); p >= 0 {
buf = buf[p:]
directoryEndOffset = size - bLen + int64(p)
break
}
if i == 1 || bLen == size {
return nil, 0, ErrFormat
}
}
// read header into struct
b := readBuf(buf[4:]) // skip signature
d := &directoryEnd{
diskNbr: uint32(b.uint16()),
dirDiskNbr: uint32(b.uint16()),
dirRecordsThisDisk: uint64(b.uint16()),
directoryRecords: uint64(b.uint16()),
directorySize: uint64(b.uint32()),
directoryOffset: uint64(b.uint32()),
commentLen: b.uint16(),
}
l := int(d.commentLen)
if l > len(b) {
return nil, 0, errors.New("zip: invalid comment length")
}
d.comment = string(b[:l])
// These values mean that the file can be a zip64 file
if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
p, err := findDirectory64End(r, directoryEndOffset)
if err == nil && p >= 0 {
directoryEndOffset = p
err = readDirectory64End(r, p, d)
}
if err != nil {
return nil, 0, err
}
}
maxInt64 := uint64(1<<63 - 1)
if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 {
return nil, 0, ErrFormat
}
baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset)
// Make sure directoryOffset points to somewhere in our file.
if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size {
return nil, 0, ErrFormat
}
// If the directory end data tells us to use a non-zero baseOffset,
// but we would find a valid directory entry if we assume that the
// baseOffset is 0, then just use a baseOffset of 0.
// We've seen files in which the directory end data gives us
// an incorrect baseOffset.
if baseOffset > 0 {
off := int64(d.directoryOffset)
rs := io.NewSectionReader(r, off, size-off)
if readDirectoryHeader(&File{}, rs) == nil {
baseOffset = 0
}
}
return d, baseOffset, nil
}
// findDirectory64End tries to read the zip64 locator just before the
// directory end and returns the offset of the zip64 directory end if
// found.
func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
locOffset := directoryEndOffset - directory64LocLen
if locOffset < 0 {
return -1, nil // no need to look for a header outside the file
}
buf := make([]byte, directory64LocLen)
if _, err := r.ReadAt(buf, locOffset); err != nil {
return -1, err
}
b := readBuf(buf)
if sig := b.uint32(); sig != directory64LocSignature {
return -1, nil
}
if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
return -1, nil // the file is not a valid zip64-file
}
p := b.uint64() // relative offset of the zip64 end of central directory record
if b.uint32() != 1 { // total number of disks
return -1, nil // the file is not a valid zip64-file
}
return int64(p), nil
}
// readDirectory64End reads the zip64 directory end and updates the
// directory end with the zip64 directory end values.
func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
buf := make([]byte, directory64EndLen)
if _, err := r.ReadAt(buf, offset); err != nil {
return err
}
b := readBuf(buf)
if sig := b.uint32(); sig != directory64EndSignature {
return ErrFormat
}
b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16)
d.diskNbr = b.uint32() // number of this disk
d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory
d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
d.directoryRecords = b.uint64() // total number of entries in the central directory
d.directorySize = b.uint64() // size of the central directory
d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number
return nil
}
func findSignatureInBlock(b []byte) int {
for i := len(b) - directoryEndLen; i >= 0; i-- {
// defined from directoryEndSignature in struct.go
if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
// n is length of comment
n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
if n+directoryEndLen+i > len(b) {
// Truncated comment.
// Some parsers (such as Info-ZIP) ignore the truncated comment
// rather than treating it as a hard error.
return -1
}
return i
}
}
return -1
}
type readBuf []byte
func (b *readBuf) uint8() uint8 {
v := (*b)[0]
*b = (*b)[1:]
return v
}
func (b *readBuf) uint16() uint16 {
v := binary.LittleEndian.Uint16(*b)
*b = (*b)[2:]
return v
}
func (b *readBuf) uint32() uint32 {
v := binary.LittleEndian.Uint32(*b)
*b = (*b)[4:]
return v
}
func (b *readBuf) uint64() uint64 {
v := binary.LittleEndian.Uint64(*b)
*b = (*b)[8:]
return v
}
func (b *readBuf) sub(n int) readBuf {
b2 := (*b)[:n]
*b = (*b)[n:]
return b2
}
// A fileListEntry is a File and its ename.
// If file == nil, the fileListEntry describes a directory without metadata.
type fileListEntry struct {
name string
file *File
isDir bool
isDup bool
}
type fileInfoDirEntry interface {
fs.FileInfo
fs.DirEntry
}
func (f *fileListEntry) stat() (fileInfoDirEntry, error) {
if f.isDup {
return nil, errors.New(f.name + ": duplicate entries in zip file")
}
if !f.isDir {
return headerFileInfo{&f.file.FileHeader}, nil
}
return f, nil
}
// Only used for directories.
func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem }
func (f *fileListEntry) Size() int64 { return 0 }
func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
func (f *fileListEntry) IsDir() bool { return true }
func (f *fileListEntry) Sys() any { return nil }
func (f *fileListEntry) ModTime() time.Time {
if f.file == nil {
return time.Time{}
}
return f.file.FileHeader.Modified.UTC()
}
func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
func (f *fileListEntry) String() string {
return formatDirEntry(f)
}
// formatDirEntry returns a formatted version of dir for human readability.
// Implementations of [DirEntry] can call this from a String method.
// The outputs for a directory named subdir and a file named hello.go are:
//
// d subdir/
// - hello.go
//
// TODO: Use fs.FormatDirEntry when Go 1.20 is no longer supported
func formatDirEntry(dir fs.DirEntry) string {
name := dir.Name()
b := make([]byte, 0, 5+len(name))
// The Type method does not return any permission bits,
// so strip them from the string.
mode := dir.Type().String()
mode = mode[:len(mode)-9]
b = append(b, mode...)
b = append(b, ' ')
b = append(b, name...)
if dir.IsDir() {
b = append(b, '/')
}
return string(b)
}
// formatFileInfo returns a formatted version of info for human readability.
// Implementations of [FileInfo] can call this from a String method.
// The output for a file named "hello.go", 100 bytes, mode 0o644, created
// January 1, 1970 at noon is
//
// -rw-r--r-- 100 1970-01-01 12:00:00 hello.go
//
// TODO: Use fs.FormatFileInfo when Go 1.20 is no longer supported
func formatFileInfo(info fs.FileInfo) string {
name := info.Name()
b := make([]byte, 0, 40+len(name))
b = append(b, info.Mode().String()...)
b = append(b, ' ')
size := info.Size()
var usize uint64
if size >= 0 {
usize = uint64(size)
} else {
b = append(b, '-')
usize = uint64(-size)
}
var buf [20]byte
i := len(buf) - 1
for usize >= 10 {
q := usize / 10
buf[i] = byte('0' + usize - q*10)
i--
usize = q
}
buf[i] = byte('0' + usize)
b = append(b, buf[i:]...)
b = append(b, ' ')
b = append(b, info.ModTime().Format(time.DateTime)...)
b = append(b, ' ')
b = append(b, name...)
if info.IsDir() {
b = append(b, '/')
}
return string(b)
}
// toValidName coerces name to be a valid name for fs.FS.Open.
func toValidName(name string) string {
name = strings.ReplaceAll(name, `\`, `/`)
p := path.Clean(name)
p = strings.TrimPrefix(p, "/")
for strings.HasPrefix(p, "../") {
p = p[len("../"):]
}
return p
}
func (r *Reader) initFileList() {
r.fileListOnce.Do(func() {
// files and knownDirs map from a file/directory name
// to an index into the r.fileList entry that we are
// building. They are used to mark duplicate entries.
files := make(map[string]int)
knownDirs := make(map[string]int)
// dirs[name] is true if name is known to be a directory,
// because it appears as a prefix in a path.
dirs := make(map[string]bool)
for _, file := range r.File {
isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
name := toValidName(file.Name)
if name == "" {
continue
}
if idx, ok := files[name]; ok {
r.fileList[idx].isDup = true
continue
}
if idx, ok := knownDirs[name]; ok {
r.fileList[idx].isDup = true
continue
}
for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
dirs[dir] = true
}
idx := len(r.fileList)
entry := fileListEntry{
name: name,
file: file,
isDir: isDir,
}
r.fileList = append(r.fileList, entry)
if isDir {
knownDirs[name] = idx
} else {
files[name] = idx
}
}
for dir := range dirs {
if _, ok := knownDirs[dir]; !ok {
if idx, ok := files[dir]; ok {
r.fileList[idx].isDup = true
} else {
entry := fileListEntry{
name: dir,
file: nil,
isDir: true,
}
r.fileList = append(r.fileList, entry)
}
}
}
sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
})
}
func fileEntryLess(x, y string) bool {
xdir, xelem, _ := split(x)
ydir, yelem, _ := split(y)
return xdir < ydir || xdir == ydir && xelem < yelem
}
// Open opens the named file in the ZIP archive,
// using the semantics of fs.FS.Open:
// paths are always slash separated, with no
// leading / or ../ elements.
func (r *Reader) Open(name string) (fs.File, error) {
r.initFileList()
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
}
e := r.openLookup(name)
if e == nil {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
if e.isDir {
return &openDir{e, r.openReadDir(name), 0}, nil
}
rc, err := e.file.Open()
if err != nil {
return nil, err
}
return rc.(fs.File), nil
}
func split(name string) (dir, elem string, isDir bool) {
if len(name) > 0 && name[len(name)-1] == '/' {
isDir = true
name = name[:len(name)-1]
}
i := len(name) - 1
for i >= 0 && name[i] != '/' {
i--
}
if i < 0 {
return ".", name, isDir
}
return name[:i], name[i+1:], isDir
}
var dotFile = &fileListEntry{name: "./", isDir: true}
func (r *Reader) openLookup(name string) *fileListEntry {
if name == "." {
return dotFile
}
dir, elem, _ := split(name)
files := r.fileList
i := sort.Search(len(files), func(i int) bool {
idir, ielem, _ := split(files[i].name)
return idir > dir || idir == dir && ielem >= elem
})
if i < len(files) {
fname := files[i].name
if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
return &files[i]
}
}
return nil
}
func (r *Reader) openReadDir(dir string) []fileListEntry {
files := r.fileList
i := sort.Search(len(files), func(i int) bool {
idir, _, _ := split(files[i].name)
return idir >= dir
})
j := sort.Search(len(files), func(j int) bool {
jdir, _, _ := split(files[j].name)
return jdir > dir
})
return files[i:j]
}
type openDir struct {
e *fileListEntry
files []fileListEntry
offset int
}
func (d *openDir) Close() error { return nil }
func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat() }
func (d *openDir) Read([]byte) (int, error) {
return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
}
func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
n := len(d.files) - d.offset
if count > 0 && n > count {
n = count
}
if n == 0 {
if count <= 0 {
return nil, nil
}
return nil, io.EOF
}
list := make([]fs.DirEntry, n)
for i := range list {
s, err := d.files[d.offset+i].stat()
if err != nil {
return nil, err
}
list[i] = s
}
d.offset += n
return list, nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"errors"
"io"
"sync"
"github.com/klauspost/compress/flate"
)
// A Compressor returns a new compressing writer, writing to w.
// The WriteCloser's Close method must be used to flush pending data to w.
// The Compressor itself must be safe to invoke from multiple goroutines
// simultaneously, but each returned writer will be used only by
// one goroutine at a time.
type Compressor func(w io.Writer) (io.WriteCloser, error)
// A Decompressor returns a new decompressing reader, reading from r.
// The [io.ReadCloser]'s Close method must be used to release associated resources.
// The Decompressor itself must be safe to invoke from multiple goroutines
// simultaneously, but each returned reader will be used only by
// one goroutine at a time.
type Decompressor func(r io.Reader) io.ReadCloser
var flateWriterPool sync.Pool
func newFlateWriter(w io.Writer) io.WriteCloser {
fw, ok := flateWriterPool.Get().(*flate.Writer)
if ok {
fw.Reset(w)
} else {
fw, _ = flate.NewWriter(w, 5)
}
return &pooledFlateWriter{fw: fw}
}
type pooledFlateWriter struct {
mu sync.Mutex // guards Close and Write
fw *flate.Writer
}
func (w *pooledFlateWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
if w.fw == nil {
return 0, errors.New("Write after Close")
}
return w.fw.Write(p)
}
func (w *pooledFlateWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
var err error
if w.fw != nil {
err = w.fw.Close()
flateWriterPool.Put(w.fw)
w.fw = nil
}
return err
}
var flateReaderPool sync.Pool
func newFlateReader(r io.Reader) io.ReadCloser {
fr, ok := flateReaderPool.Get().(io.ReadCloser)
if ok {
fr.(flate.Resetter).Reset(r, nil)
} else {
fr = flate.NewReader(r)
}
return &pooledFlateReader{fr: fr}
}
type pooledFlateReader struct {
mu sync.Mutex // guards Close and Read
fr io.ReadCloser
}
func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.fr == nil {
return 0, errors.New("Read after Close")
}
return r.fr.Read(p)
}
func (r *pooledFlateReader) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
var err error
if r.fr != nil {
err = r.fr.Close()
flateReaderPool.Put(r.fr)
r.fr = nil
}
return err
}
var (
compressors sync.Map // map[uint16]Compressor
decompressors sync.Map // map[uint16]Decompressor
)
func init() {
compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
decompressors.Store(Store, Decompressor(io.NopCloser))
decompressors.Store(Deflate, Decompressor(newFlateReader))
}
// RegisterDecompressor allows custom decompressors for a specified method ID.
// The common methods [Store] and [Deflate] are built in.
func RegisterDecompressor(method uint16, dcomp Decompressor) {
if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
panic("decompressor already registered")
}
}
// RegisterCompressor registers custom compressors for a specified method ID.
// The common methods [Store] and [Deflate] are built in.
func RegisterCompressor(method uint16, comp Compressor) {
if _, dup := compressors.LoadOrStore(method, comp); dup {
panic("compressor already registered")
}
}
func compressor(method uint16) Compressor {
ci, ok := compressors.Load(method)
if !ok {
return nil
}
return ci.(Compressor)
}
func decompressor(method uint16) Decompressor {
di, ok := decompressors.Load(method)
if !ok {
return nil
}
return di.(Decompressor)
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package zip provides support for reading and writing ZIP archives.
See the [ZIP specification] for details.
This package does not support disk spanning.
A note about ZIP64:
To be backwards compatible the FileHeader has both 32 and 64 bit Size
fields. The 64 bit fields will always contain the correct value and
for normal archives both fields will be the same. For files requiring
the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
fields must be used instead.
[ZIP specification]: https://support.pkware.com/pkzip/appnote
*/
package zip
import (
"io/fs"
"path"
"time"
)
// Compression methods.
const (
Store uint16 = 0 // no compression
Deflate uint16 = 8 // DEFLATE compressed
)
const (
fileHeaderSignature = 0x04034b50
directoryHeaderSignature = 0x02014b50
directoryEndSignature = 0x06054b50
directory64LocSignature = 0x07064b50
directory64EndSignature = 0x06064b50
dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder
fileHeaderLen = 30 // + filename + extra
directoryHeaderLen = 46 // + filename + extra + comment
directoryEndLen = 22 // + comment
dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size
dataDescriptor64Len = 24 // two uint32: signature, crc32 | two uint64: compressed size, size
directory64LocLen = 20 //
directory64EndLen = 56 // + extra
// Constants for the first byte in CreatorVersion.
creatorFAT = 0
creatorUnix = 3
creatorNTFS = 11
creatorVFAT = 14
creatorMacOSX = 19
// Version numbers.
zipVersion20 = 20 // 2.0
zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
// Limits for non zip64 files.
uint16max = (1 << 16) - 1
uint32max = (1 << 32) - 1
// Extra header IDs.
//
// IDs 0..31 are reserved for official use by PKWARE.
// IDs above that range are defined by third-party vendors.
// Since ZIP lacked high precision timestamps (nor an official specification
// of the timezone used for the date fields), many competing extra fields
// have been invented. Pervasive use effectively makes them "official".
//
// See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
zip64ExtraID = 0x0001 // Zip64 extended information
ntfsExtraID = 0x000a // NTFS
unixExtraID = 0x000d // UNIX
extTimeExtraID = 0x5455 // Extended timestamp
infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
)
// FileHeader describes a file within a ZIP file.
// See the [ZIP specification] for details.
//
// [ZIP specification]: https://support.pkware.com/pkzip/appnote
type FileHeader struct {
// Name is the name of the file.
//
// It must be a relative path, not start with a drive letter (such as "C:"),
// and must use forward slashes instead of back slashes. A trailing slash
// indicates that this file is a directory and should have no data.
Name string
// Comment is any arbitrary user-defined string shorter than 64KiB.
Comment string
// NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
//
// By specification, the only other encoding permitted should be CP-437,
// but historically many ZIP readers interpret Name and Comment as whatever
// the system's local character encoding happens to be.
//
// This flag should only be set if the user intends to encode a non-portable
// ZIP file for a specific localized region. Otherwise, the Writer
// automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
NonUTF8 bool
CreatorVersion uint16
ReaderVersion uint16
Flags uint16
// Method is the compression method. If zero, Store is used.
Method uint16
// Modified is the modified time of the file.
//
// When reading, an extended timestamp is preferred over the legacy MS-DOS
// date field, and the offset between the times is used as the timezone.
// If only the MS-DOS date is present, the timezone is assumed to be UTC.
//
// When writing, an extended timestamp (which is timezone-agnostic) is
// always emitted. The legacy MS-DOS date field is encoded according to the
// location of the Modified time.
Modified time.Time
// ModifiedTime is an MS-DOS-encoded time.
//
// Deprecated: Use Modified instead.
ModifiedTime uint16
// ModifiedDate is an MS-DOS-encoded date.
//
// Deprecated: Use Modified instead.
ModifiedDate uint16
// CRC32 is the CRC32 checksum of the file content.
CRC32 uint32
// CompressedSize is the compressed size of the file in bytes.
// If either the uncompressed or compressed size of the file
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
//
// Deprecated: Use CompressedSize64 instead.
CompressedSize uint32
// UncompressedSize is the compressed size of the file in bytes.
// If either the uncompressed or compressed size of the file
// does not fit in 32 bits, CompressedSize is set to ^uint32(0).
//
// Deprecated: Use UncompressedSize64 instead.
UncompressedSize uint32
// CompressedSize64 is the compressed size of the file in bytes.
CompressedSize64 uint64
// UncompressedSize64 is the uncompressed size of the file in bytes.
UncompressedSize64 uint64
Extra []byte
ExternalAttrs uint32 // Meaning depends on CreatorVersion
}
// FileInfo returns an fs.FileInfo for the [FileHeader].
func (h *FileHeader) FileInfo() fs.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements [fs.FileInfo].
type headerFileInfo struct {
fh *FileHeader
}
func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
func (fi headerFileInfo) Size() int64 {
if fi.fh.UncompressedSize64 > 0 {
return int64(fi.fh.UncompressedSize64)
}
return int64(fi.fh.UncompressedSize)
}
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time {
if fi.fh.Modified.IsZero() {
return fi.fh.ModTime()
}
return fi.fh.Modified.UTC()
}
func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() }
func (fi headerFileInfo) Sys() any { return fi.fh }
func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
func (fi headerFileInfo) String() string {
return formatFileInfo(fi)
}
// FileInfoHeader creates a partially-populated [FileHeader] from an
// fs.FileInfo.
// Because fs.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
// If compression is desired, callers should set the FileHeader.Method
// field; it is unset by default.
func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
size := fi.Size()
fh := &FileHeader{
Name: fi.Name(),
UncompressedSize64: uint64(size),
}
fh.SetModTime(fi.ModTime())
fh.SetMode(fi.Mode())
if fh.UncompressedSize64 > uint32max {
fh.UncompressedSize = uint32max
} else {
fh.UncompressedSize = uint32(fh.UncompressedSize64)
}
return fh, nil
}
type directoryEnd struct {
diskNbr uint32 // unused
dirDiskNbr uint32 // unused
dirRecordsThisDisk uint64 // unused
directoryRecords uint64
directorySize uint64
directoryOffset uint64 // relative to file
commentLen uint16
comment string
}
// timeZone returns a *time.Location based on the provided offset.
// If the offset is non-sensible, then this uses an offset of zero.
func timeZone(offset time.Duration) *time.Location {
const (
minOffset = -12 * time.Hour // E.g., Baker island at -12:00
maxOffset = +14 * time.Hour // E.g., Line island at +14:00
offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
)
offset = offset.Round(offsetAlias)
if offset < minOffset || maxOffset < offset {
offset = 0
}
return time.FixedZone("", int(offset/time.Second))
}
// msDosTimeToTime converts an MS-DOS date and time into a time.Time.
// The resolution is 2s.
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
return time.Date(
// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
int(dosDate>>9+1980),
time.Month(dosDate>>5&0xf),
int(dosDate&0x1f),
// time bits 0-4: second/2; 5-10: minute; 11-15: hour
int(dosTime>>11),
int(dosTime>>5&0x3f),
int(dosTime&0x1f*2),
0, // nanoseconds
time.UTC,
)
}
// timeToMsDosTime converts a time.Time to an MS-DOS date and time.
// The resolution is 2s.
// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
return
}
// ModTime returns the modification time in UTC using the legacy
// [ModifiedDate] and [ModifiedTime] fields.
//
// Deprecated: Use [Modified] instead.
func (h *FileHeader) ModTime() time.Time {
return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
}
// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields
// to the given time in UTC.
//
// Deprecated: Use [Modified] instead.
func (h *FileHeader) SetModTime(t time.Time) {
t = t.UTC() // Convert to UTC for compatibility
h.Modified = t
h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
}
const (
// Unix constants. The specification doesn't mention them,
// but these seem to be the values agreed on by tools.
s_IFMT = 0xf000
s_IFSOCK = 0xc000
s_IFLNK = 0xa000
s_IFREG = 0x8000
s_IFBLK = 0x6000
s_IFDIR = 0x4000
s_IFCHR = 0x2000
s_IFIFO = 0x1000
s_ISUID = 0x800
s_ISGID = 0x400
s_ISVTX = 0x200
msdosDir = 0x10
msdosReadOnly = 0x01
)
// Mode returns the permission and mode bits for the [FileHeader].
func (h *FileHeader) Mode() (mode fs.FileMode) {
switch h.CreatorVersion >> 8 {
case creatorUnix, creatorMacOSX:
mode = unixModeToFileMode(h.ExternalAttrs >> 16)
case creatorNTFS, creatorVFAT, creatorFAT:
mode = msdosModeToFileMode(h.ExternalAttrs)
}
if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
mode |= fs.ModeDir
}
return mode
}
// SetMode changes the permission and mode bits for the [FileHeader].
func (h *FileHeader) SetMode(mode fs.FileMode) {
h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
h.ExternalAttrs = fileModeToUnixMode(mode) << 16
// set MSDOS attributes too, as the original zip does.
if mode&fs.ModeDir != 0 {
h.ExternalAttrs |= msdosDir
}
if mode&0200 == 0 {
h.ExternalAttrs |= msdosReadOnly
}
}
// isZip64 reports whether the file size exceeds the 32 bit limit
func (h *FileHeader) isZip64() bool {
return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
}
func (h *FileHeader) hasDataDescriptor() bool {
return h.Flags&0x8 != 0
}
func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
if m&msdosDir != 0 {
mode = fs.ModeDir | 0777
} else {
mode = 0666
}
if m&msdosReadOnly != 0 {
mode &^= 0222
}
return mode
}
func fileModeToUnixMode(mode fs.FileMode) uint32 {
var m uint32
switch mode & fs.ModeType {
default:
m = s_IFREG
case fs.ModeDir:
m = s_IFDIR
case fs.ModeSymlink:
m = s_IFLNK
case fs.ModeNamedPipe:
m = s_IFIFO
case fs.ModeSocket:
m = s_IFSOCK
case fs.ModeDevice:
m = s_IFBLK
case fs.ModeDevice | fs.ModeCharDevice:
m = s_IFCHR
}
if mode&fs.ModeSetuid != 0 {
m |= s_ISUID
}
if mode&fs.ModeSetgid != 0 {
m |= s_ISGID
}
if mode&fs.ModeSticky != 0 {
m |= s_ISVTX
}
return m | uint32(mode&0777)
}
func unixModeToFileMode(m uint32) fs.FileMode {
mode := fs.FileMode(m & 0777)
switch m & s_IFMT {
case s_IFBLK:
mode |= fs.ModeDevice
case s_IFCHR:
mode |= fs.ModeDevice | fs.ModeCharDevice
case s_IFDIR:
mode |= fs.ModeDir
case s_IFIFO:
mode |= fs.ModeNamedPipe
case s_IFLNK:
mode |= fs.ModeSymlink
case s_IFREG:
// nothing to do
case s_IFSOCK:
mode |= fs.ModeSocket
}
if m&s_ISGID != 0 {
mode |= fs.ModeSetgid
}
if m&s_ISUID != 0 {
mode |= fs.ModeSetuid
}
if m&s_ISVTX != 0 {
mode |= fs.ModeSticky
}
return mode
}
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zip
import (
"bufio"
"encoding/binary"
"errors"
"hash"
"hash/crc32"
"io"
"io/fs"
"strings"
"unicode/utf8"
)
var (
errLongName = errors.New("zip: FileHeader.Name too long")
errLongExtra = errors.New("zip: FileHeader.Extra too long")
)
// Writer implements a zip file writer.
type Writer struct {
cw *countWriter
dir []*header
last *fileWriter
closed bool
compressors map[uint16]Compressor
comment string
// testHookCloseSizeOffset if non-nil is called with the size
// of offset of the central directory at Close.
testHookCloseSizeOffset func(size, offset uint64)
}
type header struct {
*FileHeader
offset uint64
raw bool
}
// NewWriter returns a new [Writer] writing a zip file to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
}
// SetOffset sets the offset of the beginning of the zip data within the
// underlying writer. It should be used when the zip data is appended to an
// existing file, such as a binary executable.
// It must be called before any data is written.
func (w *Writer) SetOffset(n int64) {
if w.cw.count != 0 {
panic("zip: SetOffset called after data was written")
}
w.cw.count = n
}
// Flush flushes any buffered data to the underlying writer.
// Calling Flush is not normally necessary; calling Close is sufficient.
func (w *Writer) Flush() error {
return w.cw.w.(*bufio.Writer).Flush()
}
// SetComment sets the end-of-central-directory comment field.
// It can only be called before [Writer.Close].
func (w *Writer) SetComment(comment string) error {
if len(comment) > uint16max {
return errors.New("zip: Writer.Comment too long")
}
w.comment = comment
return nil
}
// Close finishes writing the zip file by writing the central directory.
// It does not close the underlying writer.
func (w *Writer) Close() error {
if w.last != nil && !w.last.closed {
if err := w.last.close(); err != nil {
return err
}
w.last = nil
}
if w.closed {
return errors.New("zip: writer closed twice")
}
w.closed = true
// write central directory
start := w.cw.count
for _, h := range w.dir {
var buf [directoryHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryHeaderSignature))
b.uint16(h.CreatorVersion)
b.uint16(h.ReaderVersion)
b.uint16(h.Flags)
b.uint16(h.Method)
b.uint16(h.ModifiedTime)
b.uint16(h.ModifiedDate)
b.uint32(h.CRC32)
if h.isZip64() || h.offset >= uint32max {
// the file needs a zip64 header. store maxint in both
// 32 bit size fields (and offset later) to signal that the
// zip64 extra header should be used.
b.uint32(uint32max) // compressed size
b.uint32(uint32max) // uncompressed size
// append a zip64 extra block to Extra
var buf [28]byte // 2x uint16 + 3x uint64
eb := writeBuf(buf[:])
eb.uint16(zip64ExtraID)
eb.uint16(24) // size = 3x uint64
eb.uint64(h.UncompressedSize64)
eb.uint64(h.CompressedSize64)
eb.uint64(h.offset)
h.Extra = append(h.Extra, buf[:]...)
} else {
b.uint32(h.CompressedSize)
b.uint32(h.UncompressedSize)
}
b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra)))
b.uint16(uint16(len(h.Comment)))
b = b[4:] // skip disk number start and internal file attr (2x uint16)
b.uint32(h.ExternalAttrs)
if h.offset > uint32max {
b.uint32(uint32max)
} else {
b.uint32(uint32(h.offset))
}
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w.cw, h.Name); err != nil {
return err
}
if _, err := w.cw.Write(h.Extra); err != nil {
return err
}
if _, err := io.WriteString(w.cw, h.Comment); err != nil {
return err
}
}
end := w.cw.count
records := uint64(len(w.dir))
size := uint64(end - start)
offset := uint64(start)
if f := w.testHookCloseSizeOffset; f != nil {
f(size, offset)
}
if records >= uint16max || size >= uint32max || offset >= uint32max {
var buf [directory64EndLen + directory64LocLen]byte
b := writeBuf(buf[:])
// zip64 end of central directory record
b.uint32(directory64EndSignature)
b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64)
b.uint16(zipVersion45) // version made by
b.uint16(zipVersion45) // version needed to extract
b.uint32(0) // number of this disk
b.uint32(0) // number of the disk with the start of the central directory
b.uint64(records) // total number of entries in the central directory on this disk
b.uint64(records) // total number of entries in the central directory
b.uint64(size) // size of the central directory
b.uint64(offset) // offset of start of central directory with respect to the starting disk number
// zip64 end of central directory locator
b.uint32(directory64LocSignature)
b.uint32(0) // number of the disk with the start of the zip64 end of central directory
b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record
b.uint32(1) // total number of disks
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
// store max values in the regular end record to signal
// that the zip64 values should be used instead
records = uint16max
size = uint32max
offset = uint32max
}
// write end record
var buf [directoryEndLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(directoryEndSignature))
b = b[4:] // skip over disk number and first disk number (2x uint16)
b.uint16(uint16(records)) // number of entries this disk
b.uint16(uint16(records)) // number of entries total
b.uint32(uint32(size)) // size of directory
b.uint32(uint32(offset)) // start of directory
b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
if _, err := w.cw.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w.cw, w.comment); err != nil {
return err
}
return w.cw.w.(*bufio.Writer).Flush()
}
// Create adds a file to the zip file using the provided name.
// It returns a [Writer] to which the file contents should be written.
// The file contents will be compressed using the [Deflate] method.
// The name must be a relative path: it must not start with a drive
// letter (e.g. C:) or leading slash, and only forward slashes are
// allowed. To create a directory instead of a file, add a trailing
// slash to the name.
// The file's contents must be written to the [io.Writer] before the next
// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
func (w *Writer) Create(name string) (io.Writer, error) {
header := &FileHeader{
Name: name,
Method: Deflate,
}
return w.CreateHeader(header)
}
// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
// or any other common encoding).
func detectUTF8(s string) (valid, require bool) {
for i := 0; i < len(s); {
r, size := utf8.DecodeRuneInString(s[i:])
i += size
// Officially, ZIP uses CP-437, but many readers use the system's
// local character encoding. Most encoding are compatible with a large
// subset of CP-437, which itself is ASCII-like.
//
// Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
// characters with localized currency and overline characters.
if r < 0x20 || r > 0x7d || r == 0x5c {
if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
return false, false
}
require = true
}
}
return true, require
}
// prepare performs the bookkeeping operations required at the start of
// CreateHeader and CreateRaw.
func (w *Writer) prepare(fh *FileHeader) error {
if w.last != nil && !w.last.closed {
if err := w.last.close(); err != nil {
return err
}
}
if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
// See https://golang.org/issue/11144 confusion.
return errors.New("archive/zip: invalid duplicate FileHeader")
}
return nil
}
// CreateHeader adds a file to the zip archive using the provided [FileHeader]
// for the file metadata. [Writer] takes ownership of fh and may mutate
// its fields. The caller must not modify fh after calling [Writer.CreateHeader].
//
// This returns a [Writer] to which the file contents should be written.
// The file's contents must be written to the io.Writer before the next
// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
}
// The ZIP format has a sad state of affairs regarding character encoding.
// Officially, the name and comment fields are supposed to be encoded
// in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
// flag bit is set. However, there are several problems:
//
// * Many ZIP readers still do not support UTF-8.
// * If the UTF-8 flag is cleared, several readers simply interpret the
// name and comment fields as whatever the local system encoding is.
//
// In order to avoid breaking readers without UTF-8 support,
// we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
// However, if the strings require multibyte UTF-8 encoding and is a
// valid UTF-8 string, then we set the UTF-8 bit.
//
// For the case, where the user explicitly wants to specify the encoding
// as UTF-8, they will need to set the flag bit themselves.
utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
switch {
case fh.NonUTF8:
fh.Flags &^= 0x800
case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
fh.Flags |= 0x800
}
fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
fh.ReaderVersion = zipVersion20
// If Modified is set, this takes precedence over MS-DOS timestamp fields.
if !fh.Modified.IsZero() {
// Contrary to the FileHeader.SetModTime method, we intentionally
// do not convert to UTC, because we assume the user intends to encode
// the date using the specified timezone. A user may want this control
// because many legacy ZIP readers interpret the timestamp according
// to the local timezone.
//
// The timezone is only non-UTC if a user directly sets the Modified
// field directly themselves. All other approaches sets UTC.
fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
// Use "extended timestamp" format since this is what Info-ZIP uses.
// Nearly every major ZIP implementation uses a different format,
// but at least most seem to be able to understand the other formats.
//
// This format happens to be identical for both local and central header
// if modification time is the only timestamp being encoded.
var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
mt := uint32(fh.Modified.Unix())
eb := writeBuf(mbuf[:])
eb.uint16(extTimeExtraID)
eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32)
eb.uint8(1) // Flags: ModTime
eb.uint32(mt) // ModTime
fh.Extra = append(fh.Extra, mbuf[:]...)
}
var (
ow io.Writer
fw *fileWriter
)
h := &header{
FileHeader: fh,
offset: uint64(w.cw.count),
}
if strings.HasSuffix(fh.Name, "/") {
// Set the compression method to Store to ensure data length is truly zero,
// which the writeHeader method always encodes for the size fields.
// This is necessary as most compression formats have non-zero lengths
// even when compressing an empty string.
fh.Method = Store
fh.Flags &^= 0x8 // we will not write a data descriptor
// Explicitly clear sizes as they have no meaning for directories.
fh.CompressedSize = 0
fh.CompressedSize64 = 0
fh.UncompressedSize = 0
fh.UncompressedSize64 = 0
ow = dirWriter{}
} else {
fh.Flags |= 0x8 // we will write a data descriptor
fw = &fileWriter{
zipw: w.cw,
compCount: &countWriter{w: w.cw},
crc32: crc32.NewIEEE(),
}
comp := w.compressor(fh.Method)
if comp == nil {
return nil, ErrAlgorithm
}
var err error
fw.comp, err = comp(fw.compCount)
if err != nil {
return nil, err
}
fw.rawCount = &countWriter{w: fw.comp}
fw.header = h
ow = fw
}
w.dir = append(w.dir, h)
if err := writeHeader(w.cw, h); err != nil {
return nil, err
}
// If we're creating a directory, fw is nil.
w.last = fw
return ow, nil
}
func writeHeader(w io.Writer, h *header) error {
const maxUint16 = 1<<16 - 1
if len(h.Name) > maxUint16 {
return errLongName
}
if len(h.Extra) > maxUint16 {
return errLongExtra
}
var buf [fileHeaderLen]byte
b := writeBuf(buf[:])
b.uint32(uint32(fileHeaderSignature))
b.uint16(h.ReaderVersion)
b.uint16(h.Flags)
b.uint16(h.Method)
b.uint16(h.ModifiedTime)
b.uint16(h.ModifiedDate)
// In raw mode (caller does the compression), the values are either
// written here or in the trailing data descriptor based on the header
// flags.
if h.raw && !h.hasDataDescriptor() {
b.uint32(h.CRC32)
b.uint32(uint32(min(h.CompressedSize64, uint32max)))
b.uint32(uint32(min(h.UncompressedSize64, uint32max)))
} else {
// When this package handle the compression, these values are
// always written to the trailing data descriptor.
b.uint32(0) // crc32
b.uint32(0) // compressed size
b.uint32(0) // uncompressed size
}
b.uint16(uint16(len(h.Name)))
b.uint16(uint16(len(h.Extra)))
if _, err := w.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w, h.Name); err != nil {
return err
}
_, err := w.Write(h.Extra)
return err
}
// CreateRaw adds a file to the zip archive using the provided [FileHeader] and
// returns a [Writer] to which the file contents should be written. The file's
// contents must be written to the io.Writer before the next call to [Writer.Create],
// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
//
// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
if err := w.prepare(fh); err != nil {
return nil, err
}
fh.CompressedSize = uint32(min(fh.CompressedSize64, uint32max))
fh.UncompressedSize = uint32(min(fh.UncompressedSize64, uint32max))
h := &header{
FileHeader: fh,
offset: uint64(w.cw.count),
raw: true,
}
w.dir = append(w.dir, h)
if err := writeHeader(w.cw, h); err != nil {
return nil, err
}
if strings.HasSuffix(fh.Name, "/") {
w.last = nil
return dirWriter{}, nil
}
fw := &fileWriter{
header: h,
zipw: w.cw,
}
w.last = fw
return fw, nil
}
// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw
// form directly bypassing decompression, compression, and validation.
func (w *Writer) Copy(f *File) error {
r, err := f.OpenRaw()
if err != nil {
return err
}
fw, err := w.CreateRaw(&f.FileHeader)
if err != nil {
return err
}
_, err = io.Copy(fw, r)
return err
}
// RegisterCompressor registers or overrides a custom compressor for a specific
// method ID. If a compressor for a given method is not found, [Writer] will
// default to looking up the compressor at the package level.
func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
if w.compressors == nil {
w.compressors = make(map[uint16]Compressor)
}
w.compressors[method] = comp
}
// AddFS adds the files from fs.FS to the archive.
// It walks the directory tree starting at the root of the filesystem
// adding each file to the zip using deflate while maintaining the directory structure.
func (w *Writer) AddFS(fsys fs.FS) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return errors.New("zip: cannot add non-regular file")
}
h, err := FileInfoHeader(info)
if err != nil {
return err
}
h.Name = name
h.Method = Deflate
fw, err := w.CreateHeader(h)
if err != nil {
return err
}
f, err := fsys.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(fw, f)
return err
})
}
func (w *Writer) compressor(method uint16) Compressor {
comp := w.compressors[method]
if comp == nil {
comp = compressor(method)
}
return comp
}
type dirWriter struct{}
func (dirWriter) Write(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
return 0, errors.New("zip: write to directory")
}
type fileWriter struct {
*header
zipw io.Writer
rawCount *countWriter
comp io.WriteCloser
compCount *countWriter
crc32 hash.Hash32
closed bool
}
func (w *fileWriter) Write(p []byte) (int, error) {
if w.closed {
return 0, errors.New("zip: write to closed file")
}
if w.raw {
return w.zipw.Write(p)
}
w.crc32.Write(p)
return w.rawCount.Write(p)
}
func (w *fileWriter) close() error {
if w.closed {
return errors.New("zip: file closed twice")
}
w.closed = true
if w.raw {
return w.writeDataDescriptor()
}
if err := w.comp.Close(); err != nil {
return err
}
// update FileHeader
fh := w.header.FileHeader
fh.CRC32 = w.crc32.Sum32()
fh.CompressedSize64 = uint64(w.compCount.count)
fh.UncompressedSize64 = uint64(w.rawCount.count)
if fh.isZip64() {
fh.CompressedSize = uint32max
fh.UncompressedSize = uint32max
fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions
} else {
fh.CompressedSize = uint32(fh.CompressedSize64)
fh.UncompressedSize = uint32(fh.UncompressedSize64)
}
return w.writeDataDescriptor()
}
func (w *fileWriter) writeDataDescriptor() error {
if !w.hasDataDescriptor() {
return nil
}
// Write data descriptor. This is more complicated than one would
// think, see e.g. comments in zipfile.c:putextended() and
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
// The approach here is to write 8 byte sizes if needed without
// adding a zip64 extra in the local header (too late anyway).
var buf []byte
if w.isZip64() {
buf = make([]byte, dataDescriptor64Len)
} else {
buf = make([]byte, dataDescriptorLen)
}
b := writeBuf(buf)
b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X
b.uint32(w.CRC32)
if w.isZip64() {
b.uint64(w.CompressedSize64)
b.uint64(w.UncompressedSize64)
} else {
b.uint32(w.CompressedSize)
b.uint32(w.UncompressedSize)
}
_, err := w.zipw.Write(buf)
return err
}
type countWriter struct {
w io.Writer
count int64
}
func (w *countWriter) Write(p []byte) (int, error) {
n, err := w.w.Write(p)
w.count += int64(n)
return n, err
}
type nopCloser struct {
io.Writer
}
func (w nopCloser) Close() error {
return nil
}
type writeBuf []byte
func (b *writeBuf) uint8(v uint8) {
(*b)[0] = v
*b = (*b)[1:]
}
func (b *writeBuf) uint16(v uint16) {
binary.LittleEndian.PutUint16(*b, v)
*b = (*b)[2:]
}
func (b *writeBuf) uint32(v uint32) {
binary.LittleEndian.PutUint32(*b, v)
*b = (*b)[4:]
}
func (b *writeBuf) uint64(v uint64) {
binary.LittleEndian.PutUint64(*b, v)
*b = (*b)[8:]
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"errors"
"fmt"
"io"
"math/bits"
"github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
type bitReader struct {
in []byte
value uint64 // Maybe use [16]byte, but shifting is awkward.
cursor int // offset where next read should end
bitsRead uint8
}
// init initializes and resets the bit reader.
func (b *bitReader) init(in []byte) error {
if len(in) < 1 {
return errors.New("corrupt stream: too short")
}
b.in = in
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
b.cursor = len(in)
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
b.fillFastStart()
} else {
b.fill()
b.fill()
}
b.bitsRead += 8 - uint8(highBits(uint32(v)))
return nil
}
// getBits will return n bits. n can be 0.
func (b *bitReader) getBits(n uint8) int {
if n == 0 /*|| b.bitsRead >= 64 */ {
return 0
}
return int(b.get32BitsFast(n))
}
// get32BitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReader) get32BitsFast(n uint8) uint32 {
const regMask = 64 - 1
v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
b.cursor -= 4
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
b.cursor -= 8
b.value = le.Load64(b.in, b.cursor)
b.bitsRead = 0
}
// fill() will make sure at least 32 bits are available.
func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.cursor >= 4 {
b.cursor -= 4
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
return
}
b.bitsRead -= uint8(8 * b.cursor)
for b.cursor > 0 {
b.cursor -= 1
b.value = (b.value << 8) | uint64(b.in[b.cursor])
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
return b.cursor == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
func (b *bitReader) overread() bool {
return b.bitsRead > 64
}
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
b.cursor = 0
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
return nil
}
func highBits(val uint32) (n uint32) {
return uint32(bits.Len32(val) - 1)
}
// Copyright 2018 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
package zstd
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
bitContainer uint64
nBits uint8
out []byte
}
// bitMask16 is bitmasks. Has extra to avoid bounds check.
var bitMask16 = [32]uint16{
0, 1, 3, 7, 0xF, 0x1F,
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
var bitMask32 = [32]uint32{
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
} // up to 32 bits
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits32NC will add up to 31 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits64NC will add up to 64 bits.
// There must be space for 32 bits.
func (b *bitWriter) addBits64NC(value uint64, bits uint8) {
if bits <= 31 {
b.addBits32Clean(uint32(value), bits)
return
}
b.addBits32Clean(uint32(value), 32)
b.flush32()
b.addBits32Clean(uint32(value>>32), bits-32)
}
// addBits32Clean will add up to 32 bits.
// It will not check if there is space for them.
// The input must not contain more bits than specified.
func (b *bitWriter) addBits32Clean(value uint32, bits uint8) {
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
return
}
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24))
b.nBits -= 32
b.bitContainer >>= 32
}
// flushAlign will flush remaining full bytes and align to next byte boundary.
func (b *bitWriter) flushAlign() {
nbBytes := (b.nBits + 7) >> 3
for i := uint8(0); i < nbBytes; i++ {
b.out = append(b.out, byte(b.bitContainer>>(i*8)))
}
b.nBits = 0
b.bitContainer = 0
}
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
}
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"errors"
"fmt"
"hash/crc32"
"io"
"sync"
"github.com/klauspost/compress/huff0"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
type blockType uint8
//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex
const (
blockTypeRaw blockType = iota
blockTypeRLE
blockTypeCompressed
blockTypeReserved
)
type literalsBlockType uint8
const (
literalsBlockRaw literalsBlockType = iota
literalsBlockRLE
literalsBlockCompressed
literalsBlockTreeless
)
const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
compressedBlockOverAlloc = 16
maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
maxOffsetBits = 30
)
var (
huffDecoderPool = sync.Pool{New: func() interface{} {
return &huff0.Scratch{}
}}
fseDecoderPool = sync.Pool{New: func() interface{} {
return &fseDecoder{}
}}
)
type blockDec struct {
// Raw source data of the block.
data []byte
dataStorage []byte
// Destination of the decoded data.
dst []byte
// Buffer for literals data.
literalBuf []byte
// Window size of the block.
WindowSize uint64
err error
// Check against this crc, if hasCRC is true.
checkCRC uint32
hasCRC bool
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
localFrame *frameDec
sequence []seqVals
async struct {
newHist *history
literals []byte
seqData []byte
seqSize int // Size of uncompressed sequences
fcs uint64
}
// Block is RLE, this is the size.
RLESize uint32
Type blockType
// Is this the last block of a frame?
Last bool
// Use less memory
lowMem bool
}
func (b *blockDec) String() string {
if b == nil {
return "<nil>"
}
return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize)
}
func newBlockDec(lowMem bool) *blockDec {
b := blockDec{
lowMem: lowMem,
}
return &b
}
// reset will reset the block.
// Input must be a start of a block and will be at the end of the block when returned.
func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.WindowSize = windowSize
tmp, err := br.readSmall(3)
if err != nil {
println("Reading block header:", err)
return err
}
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
b.Last = bh&1 != 0
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
case blockTypeRLE:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrWindowSizeExceeded
}
b.RLESize = uint32(cSize)
if b.lowMem {
maxSize = cSize
}
cSize = 1
case blockTypeCompressed:
if debugDecoder {
println("Data size on stream:", cSize)
}
b.RLESize = 0
maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrCompressedSizeTooBig
}
// Empty compressed blocks must at least be 2 bytes
// for Literals_Block_Type and one for Sequences_Section_Header.
if cSize < 2 {
return ErrBlockTooSmall
}
case blockTypeRaw:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
}
return ErrWindowSizeExceeded
}
b.RLESize = 0
// We do not need a destination for raw blocks.
maxSize = -1
default:
panic("Invalid block type")
}
// Read block data.
if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
// byteBuf doesn't need a destination buffer.
if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debugDecoder {
println("Reading block:", err, "(", cSize, ")", len(b.data))
printf("%T", br)
}
return err
}
if cap(b.dst) <= maxSize {
b.dst = make([]byte, 0, maxSize+1)
}
return nil
}
// sendEOF will make the decoder send EOF on this frame.
func (b *blockDec) sendErr(err error) {
b.Last = true
b.Type = blockTypeReserved
b.err = err
}
// Close will release resources.
// Closed blockDec cannot be reset.
func (b *blockDec) Close() {
}
// decodeBuf
func (b *blockDec) decodeBuf(hist *history) error {
switch b.Type {
case blockTypeRLE:
if cap(b.dst) < int(b.RLESize) {
if b.lowMem {
b.dst = make([]byte, b.RLESize)
} else {
b.dst = make([]byte, maxCompressedBlockSize)
}
}
b.dst = b.dst[:b.RLESize]
v := b.data[0]
for i := range b.dst {
b.dst[i] = v
}
hist.appendKeep(b.dst)
return nil
case blockTypeRaw:
hist.appendKeep(b.data)
return nil
case blockTypeCompressed:
saved := b.dst
// Append directly to history
if hist.ignoreBuffer == 0 {
b.dst = hist.b
hist.b = nil
} else {
b.dst = b.dst[:0]
}
err := b.decodeCompressed(hist)
if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
if hist.ignoreBuffer == 0 {
hist.b = b.dst
b.dst = saved
} else {
hist.appendKeep(b.dst)
}
return err
case blockTypeReserved:
// Used for returning errors.
return b.err
default:
panic("Invalid block type")
}
}
func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
if len(in) < 2 {
return in, ErrBlockTooSmall
}
litType := literalsBlockType(in[0] & 3)
var litRegenSize int
var litCompSize int
sizeFormat := (in[0] >> 2) & 3
var fourStreams bool
var literals []byte
switch litType {
case literalsBlockRaw, literalsBlockRLE:
switch sizeFormat {
case 0, 2:
// Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte.
litRegenSize = int(in[0] >> 3)
in = in[1:]
case 1:
// Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes.
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4)
in = in[2:]
case 3:
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return in, ErrBlockTooSmall
}
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
in = in[3:]
}
case literalsBlockCompressed, literalsBlockTreeless:
switch sizeFormat {
case 0, 1:
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
litRegenSize = int(n & 1023)
litCompSize = int(n >> 10)
fourStreams = sizeFormat == 1
in = in[3:]
case 2:
fourStreams = true
if len(in) < 4 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
litRegenSize = int(n & 16383)
litCompSize = int(n >> 14)
in = in[4:]
case 3:
fourStreams = true
if len(in) < 5 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
litRegenSize = int(n & 262143)
litCompSize = int(n >> 18)
in = in[5:]
}
}
if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
return in, ErrWindowSizeExceeded
}
switch litType {
case literalsBlockRaw:
if len(in) < litRegenSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
return in, ErrBlockTooSmall
}
literals = in[:litRegenSize]
in = in[litRegenSize:]
//printf("Found %d uncompressed literals\n", litRegenSize)
case literalsBlockRLE:
if len(in) < 1 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
return in, ErrBlockTooSmall
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
v := in[0]
for i := range literals {
literals[i] = v
}
in = in[1:]
if debugDecoder {
printf("Found %d RLE compressed literals\n", litRegenSize)
}
case literalsBlockTreeless:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
return in, ErrBlockTooSmall
}
// Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize]
in = in[litCompSize:]
if debugDecoder {
printf("Found %d compressed literals\n", litCompSize)
}
huff := hist.huffTree
if huff == nil {
return in, errors.New("literal block was treeless, but no history was defined")
}
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
var err error
// Use our out buffer.
huff.MaxDecodedSize = litRegenSize
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
// Make sure we don't leak our literals buffer
if err != nil {
println("decompressing literals:", err)
return in, err
}
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
case literalsBlockCompressed:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
return in, ErrBlockTooSmall
}
literals = in[:litCompSize]
in = in[litCompSize:]
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
huff := hist.huffTree
if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
huff = huffDecoderPool.Get().(*huff0.Scratch)
if huff == nil {
huff = &huff0.Scratch{}
}
}
var err error
if debugDecoder {
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
}
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
return in, err
}
hist.huffTree = huff
huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
}
if err != nil {
println("decoding compressed literals:", err)
return in, err
}
// Make sure we don't leak our literals buffer
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
// Re-cap to get extra size.
literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
}
hist.decoders.literals = literals
return in, nil
}
// decodeCompressed will start decompressing a block.
func (b *blockDec) decodeCompressed(hist *history) error {
in := b.data
in, err := b.decodeLiterals(in, hist)
if err != nil {
return err
}
err = b.prepareSequences(in, hist)
if err != nil {
return err
}
if hist.decoders.nSeqs == 0 {
b.dst = append(b.dst, hist.decoders.literals...)
return nil
}
before := len(hist.decoders.out)
err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
if err != nil {
return err
}
if hist.decoders.maxSyncLen > 0 {
hist.decoders.maxSyncLen += uint64(before)
hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
}
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
return nil
}
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("prepareSequences: %d byte(s) input\n", len(in))
}
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
return ErrBlockTooSmall
}
var nSeqs int
seqHeader := in[0]
switch {
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
case seqHeader < 255:
if len(in) < 2 {
return ErrBlockTooSmall
}
nSeqs = int(seqHeader-128)<<8 | int(in[1])
in = in[2:]
case seqHeader == 255:
if len(in) < 3 {
return ErrBlockTooSmall
}
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
if nSeqs == 0 && len(in) != 0 {
// When no sequences, there should not be any more data...
if debugDecoder {
printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
}
return ErrUnexpectedBlockSize
}
var seqs = &hist.decoders
seqs.nSeqs = nSeqs
if nSeqs > 0 {
if len(in) < 1 {
return ErrBlockTooSmall
}
br := byteReader{b: in, off: 0}
compMode := br.Uint8()
br.advance(1)
if debugDecoder {
printf("Compression modes: 0b%b", compMode)
}
if compMode&3 != 0 {
return errors.New("corrupt block: reserved bits not zero")
}
for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
println("Table", tableIndex(i), "is", mode)
}
var seq *sequenceDec
switch tableIndex(i) {
case tableLiteralLengths:
seq = &seqs.litLengths
case tableOffsets:
seq = &seqs.offsets
case tableMatchLengths:
seq = &seqs.matchLengths
default:
panic("unknown table")
}
switch mode {
case compModePredefined:
if seq.fse != nil && !seq.fse.preDefined {
fseDecoderPool.Put(seq.fse)
}
seq.fse = &fsePredef[i]
case compModeRLE:
if br.remain() < 1 {
return ErrBlockTooSmall
}
v := br.Uint8()
br.advance(1)
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
symb, err := decSymbolValue(v, symbolTableX[i])
if err != nil {
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
return err
}
seq.fse.setRLE(symb)
if debugDecoder {
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
if debugDecoder {
println("Reading table for", tableIndex(i))
}
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
if err != nil {
println("Read table error:", err)
return err
}
err = seq.fse.transform(symbolTableX[i])
if err != nil {
println("Transform table error:", err)
return err
}
if debugDecoder {
println("Read table ok", "symbolLen:", seq.fse.symbolLen)
}
case compModeRepeat:
seq.repeat = true
}
if br.overread() {
return io.ErrUnexpectedEOF
}
}
in = br.unread()
}
if debugDecoder {
println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
}
if nSeqs == 0 {
if len(b.sequence) > 0 {
b.sequence = b.sequence[:0]
}
return nil
}
br := seqs.br
if br == nil {
br = &bitReader{}
}
if err := br.init(in); err != nil {
return err
}
if err := seqs.initialize(br, hist, b.dst); err != nil {
println("initializing sequences:", err)
return err
}
return nil
}
func (b *blockDec) decodeSequences(hist *history) error {
if cap(b.sequence) < hist.decoders.nSeqs {
if b.lowMem {
b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
} else {
b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
}
}
b.sequence = b.sequence[:hist.decoders.nSeqs]
if hist.decoders.nSeqs == 0 {
hist.decoders.seqSize = len(hist.decoders.literals)
return nil
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
return err
}
func (b *blockDec) executeSequences(hist *history) error {
hbytes := hist.b
if len(hbytes) > hist.windowSize {
hbytes = hbytes[len(hbytes)-hist.windowSize:]
// We do not need history anymore.
if hist.dict != nil {
hist.dict.content = nil
}
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.out = b.dst[:0]
err := hist.decoders.execute(b.sequence, hbytes)
if err != nil {
return err
}
return b.updateHistory(hist)
}
func (b *blockDec) updateHistory(hist *history) error {
if len(b.data) > maxCompressedBlockSize {
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
}
// Set output and release references.
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
if b.Last {
// if last block we don't care about history.
println("Last block, no history returned")
hist.b = hist.b[:0]
return nil
} else {
hist.append(b.dst)
if debugDecoder {
println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
}
}
hist.decoders.out, hist.decoders.literals = nil, nil
return nil
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"errors"
"fmt"
"math"
"math/bits"
"slices"
"github.com/klauspost/compress/huff0"
)
type blockEnc struct {
size int
literals []byte
sequences []seq
coders seqCoders
litEnc *huff0.Scratch
dictLitEnc *huff0.Scratch
wr bitWriter
extraLits int
output []byte
recentOffsets [3]uint32
prevRecentOffsets [3]uint32
last bool
lowMem bool
}
// init should be used once the block has been created.
// If called more than once, the effect is the same as calling reset.
func (b *blockEnc) init() {
if b.lowMem {
// 1K literals
if cap(b.literals) < 1<<10 {
b.literals = make([]byte, 0, 1<<10)
}
const defSeqs = 20
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
// 1K
if cap(b.output) < 1<<10 {
b.output = make([]byte, 0, 1<<10)
}
} else {
if cap(b.literals) < maxCompressedBlockSize {
b.literals = make([]byte, 0, maxCompressedBlockSize)
}
const defSeqs = 2000
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
if cap(b.output) < maxCompressedBlockSize {
b.output = make([]byte, 0, maxCompressedBlockSize)
}
}
if b.coders.mlEnc == nil {
b.coders.mlEnc = &fseEncoder{}
b.coders.mlPrev = &fseEncoder{}
b.coders.ofEnc = &fseEncoder{}
b.coders.ofPrev = &fseEncoder{}
b.coders.llEnc = &fseEncoder{}
b.coders.llPrev = &fseEncoder{}
}
b.litEnc = &huff0.Scratch{WantLogLess: 4}
b.reset(nil)
}
// initNewEncode can be used to reset offsets and encoders to the initial state.
func (b *blockEnc) initNewEncode() {
b.recentOffsets = [3]uint32{1, 4, 8}
b.litEnc.Reuse = huff0.ReusePolicyNone
b.coders.setPrev(nil, nil, nil)
}
// reset will reset the block for a new encode, but in the same stream,
// meaning that state will be carried over, but the block content is reset.
// If a previous block is provided, the recent offsets are carried over.
func (b *blockEnc) reset(prev *blockEnc) {
b.extraLits = 0
b.literals = b.literals[:0]
b.size = 0
b.sequences = b.sequences[:0]
b.output = b.output[:0]
b.last = false
if prev != nil {
b.recentOffsets = prev.prevRecentOffsets
}
b.dictLitEnc = nil
}
// reset will reset the block for a new encode, but in the same stream,
// meaning that state will be carried over, but the block content is reset.
// If a previous block is provided, the recent offsets are carried over.
func (b *blockEnc) swapEncoders(prev *blockEnc) {
b.coders.swap(&prev.coders)
b.litEnc, prev.litEnc = prev.litEnc, b.litEnc
}
// blockHeader contains the information for a block header.
type blockHeader uint32
// setLast sets the 'last' indicator on a block.
func (h *blockHeader) setLast(b bool) {
if b {
*h = *h | 1
} else {
const mask = (1 << 24) - 2
*h = *h & mask
}
}
// setSize will store the compressed size of a block.
func (h *blockHeader) setSize(v uint32) {
const mask = 7
*h = (*h)&mask | blockHeader(v<<3)
}
// setType sets the block type.
func (h *blockHeader) setType(t blockType) {
const mask = 1 | (((1 << 24) - 1) ^ 7)
*h = (*h & mask) | blockHeader(t<<1)
}
// appendTo will append the block header to a slice.
func (h blockHeader) appendTo(b []byte) []byte {
return append(b, uint8(h), uint8(h>>8), uint8(h>>16))
}
// String returns a string representation of the block.
func (h blockHeader) String() string {
return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1)
}
// literalsHeader contains literals header information.
type literalsHeader uint64
// setType can be used to set the type of literal block.
func (h *literalsHeader) setType(t literalsBlockType) {
const mask = math.MaxUint64 - 3
*h = (*h & mask) | literalsHeader(t)
}
// setSize can be used to set a single size, for uncompressed and RLE content.
func (h *literalsHeader) setSize(regenLen int) {
inBits := bits.Len32(uint32(regenLen))
// Only retain 2 bits
const mask = 3
lh := uint64(*h & mask)
switch {
case inBits < 5:
lh |= (uint64(regenLen) << 3) | (1 << 60)
if debugEncoder {
got := int(lh>>3) & 0xff
if got != regenLen {
panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)"))
}
}
case inBits < 12:
lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60)
case inBits < 20:
lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60)
default:
panic(fmt.Errorf("internal error: block too big (%d)", regenLen))
}
*h = literalsHeader(lh)
}
// setSizes will set the size of a compressed literals section and the input length.
func (h *literalsHeader) setSizes(compLen, inLen int, single bool) {
compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen))
// Only retain 2 bits
const mask = 3
lh := uint64(*h & mask)
switch {
case compBits <= 10 && inBits <= 10:
if !single {
lh |= 1 << 2
}
lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60)
if debugEncoder {
const mmask = (1 << 24) - 1
n := (lh >> 4) & mmask
if int(n&1023) != inLen {
panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits))
}
if int(n>>10) != compLen {
panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits))
}
}
case compBits <= 14 && inBits <= 14:
lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60)
if single {
panic("single stream used with more than 10 bits length.")
}
case compBits <= 18 && inBits <= 18:
lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60)
if single {
panic("single stream used with more than 10 bits length.")
}
default:
panic("internal error: block too big")
}
*h = literalsHeader(lh)
}
// appendTo will append the literals header to a byte slice.
func (h literalsHeader) appendTo(b []byte) []byte {
size := uint8(h >> 60)
switch size {
case 1:
b = append(b, uint8(h))
case 2:
b = append(b, uint8(h), uint8(h>>8))
case 3:
b = append(b, uint8(h), uint8(h>>8), uint8(h>>16))
case 4:
b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24))
case 5:
b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32))
default:
panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size))
}
return b
}
// size returns the output size with currently set values.
func (h literalsHeader) size() int {
return int(h >> 60)
}
func (h literalsHeader) String() string {
return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60)
}
// pushOffsets will push the recent offsets to the backup store.
func (b *blockEnc) pushOffsets() {
b.prevRecentOffsets = b.recentOffsets
}
// pushOffsets will push the recent offsets to the backup store.
func (b *blockEnc) popOffsets() {
b.recentOffsets = b.prevRecentOffsets
}
// matchOffset will adjust recent offsets and return the adjusted one,
// if it matches a previous offset.
func (b *blockEnc) matchOffset(offset, lits uint32) uint32 {
// Check if offset is one of the recent offsets.
// Adjusts the output offset accordingly.
// Gives a tiny bit of compression, typically around 1%.
if true {
if lits > 0 {
switch offset {
case b.recentOffsets[0]:
offset = 1
case b.recentOffsets[1]:
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset = 2
case b.recentOffsets[2]:
b.recentOffsets[2] = b.recentOffsets[1]
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset = 3
default:
b.recentOffsets[2] = b.recentOffsets[1]
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset += 3
}
} else {
switch offset {
case b.recentOffsets[1]:
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset = 1
case b.recentOffsets[2]:
b.recentOffsets[2] = b.recentOffsets[1]
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset = 2
case b.recentOffsets[0] - 1:
b.recentOffsets[2] = b.recentOffsets[1]
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset = 3
default:
b.recentOffsets[2] = b.recentOffsets[1]
b.recentOffsets[1] = b.recentOffsets[0]
b.recentOffsets[0] = offset
offset += 3
}
}
} else {
offset += 3
}
return offset
}
// encodeRaw can be used to set the output to a raw representation of supplied bytes.
func (b *blockEnc) encodeRaw(a []byte) {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(a)))
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output[:0])
b.output = append(b.output, a...)
if debugEncoder {
println("Adding RAW block, length", len(a), "last:", b.last)
}
}
// encodeRaw can be used to set the output to a raw representation of supplied bytes.
func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(src)))
bh.setType(blockTypeRaw)
dst = bh.appendTo(dst)
dst = append(dst, src...)
if debugEncoder {
println("Adding RAW block, length", len(src), "last:", b.last)
}
return dst
}
// encodeLits can be used if the block is only litLen.
func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(lits)))
// Don't compress extremely small blocks
if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw {
if debugEncoder {
println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
b.output = append(b.output, lits...)
return nil
}
var (
out []byte
reUsed, single bool
err error
)
if b.dictLitEnc != nil {
b.litEnc.TransferCTable(b.dictLitEnc)
b.litEnc.Reuse = huff0.ReusePolicyAllow
b.dictLitEnc = nil
}
if len(lits) >= 1024 {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
} else if len(lits) > 16 {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
if err == nil && len(out)+5 > len(lits) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSizes(len(out), len(lits), single)
if len(out)+lh.size() >= len(lits) {
err = huff0.ErrIncompressible
}
}
switch err {
case huff0.ErrIncompressible:
if debugEncoder {
println("Adding RAW block, length", len(lits), "last:", b.last)
}
bh.setType(blockTypeRaw)
b.output = bh.appendTo(b.output)
b.output = append(b.output, lits...)
return nil
case huff0.ErrUseRLE:
if debugEncoder {
println("Adding RLE block, length", len(lits))
}
bh.setType(blockTypeRLE)
b.output = bh.appendTo(b.output)
b.output = append(b.output, lits[0])
return nil
case nil:
default:
return err
}
// Compressed...
// Now, allow reuse
b.litEnc.Reuse = huff0.ReusePolicyAllow
bh.setType(blockTypeCompressed)
var lh literalsHeader
if reUsed {
if debugEncoder {
println("Reused tree, compressed to", len(out))
}
lh.setType(literalsBlockTreeless)
} else {
if debugEncoder {
println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable))
}
lh.setType(literalsBlockCompressed)
}
// Set sizes
lh.setSizes(len(out), len(lits), single)
bh.setSize(uint32(len(out) + lh.size() + 1))
// Write block headers.
b.output = bh.appendTo(b.output)
b.output = lh.appendTo(b.output)
// Add compressed data.
b.output = append(b.output, out...)
// No sequences.
b.output = append(b.output, 0)
return nil
}
// encodeRLE will encode an RLE block.
func (b *blockEnc) encodeRLE(val byte, length uint32) {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(length)
bh.setType(blockTypeRLE)
b.output = bh.appendTo(b.output)
b.output = append(b.output, val)
}
// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder(data []byte) int {
if len(data) > maxSequences || len(data) < 2 {
return 0
}
enc := fseEncoder{}
hist := enc.Histogram()
maxSym := uint8(0)
for i, v := range data {
v = v & 63
data[i] = v
hist[v]++
if v > maxSym {
maxSym = v
}
}
if maxSym == 0 {
// All 0
return 0
}
cnt := int(slices.Max(hist[:maxSym]))
if cnt == len(data) {
// RLE
return 0
}
enc.HistogramFinished(maxSym, cnt)
err := enc.normalizeCount(len(data))
if err != nil {
return 0
}
_, err = enc.writeCount(nil)
if err != nil {
panic(err)
}
return 1
}
// encode will encode the block and append the output in b.output.
// Previous offset codes must be pushed if more blocks are expected.
func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
return b.encodeLits(b.literals, rawAllLits)
}
if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
// Check common RLE cases.
seq := b.sequences[0]
if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
// Offset == 1 and 0 or 1 literals.
b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
return nil
}
}
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
if org == nil {
return errIncompressible
}
b.popOffsets()
return b.encodeLits(org, rawAllLits)
}
var bh blockHeader
var lh literalsHeader
bh.setLast(b.last)
bh.setType(blockTypeCompressed)
// Store offset of the block header. Needed when we know the size.
bhOffset := len(b.output)
b.output = bh.appendTo(b.output)
var (
out []byte
reUsed, single bool
err error
)
if b.dictLitEnc != nil {
b.litEnc.TransferCTable(b.dictLitEnc)
b.litEnc.Reuse = huff0.ReusePolicyAllow
b.dictLitEnc = nil
}
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
} else if len(b.literals) > 16 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
if err == nil && len(out)+5 > len(b.literals) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSize(len(b.literals))
szRaw := lh.size()
lh.setSizes(len(out), len(b.literals), single)
szComp := lh.size()
if len(out)+szComp >= len(b.literals)+szRaw {
err = huff0.ErrIncompressible
}
}
switch err {
case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw)
lh.setSize(len(b.literals))
b.output = lh.appendTo(b.output)
b.output = append(b.output, b.literals...)
if debugEncoder {
println("Adding literals RAW, length", len(b.literals))
}
case huff0.ErrUseRLE:
lh.setType(literalsBlockRLE)
lh.setSize(len(b.literals))
b.output = lh.appendTo(b.output)
b.output = append(b.output, b.literals[0])
if debugEncoder {
println("Adding literals RLE")
}
case nil:
// Compressed litLen...
if reUsed {
if debugEncoder {
println("reused tree")
}
lh.setType(literalsBlockTreeless)
} else {
if debugEncoder {
println("new tree, size:", len(b.litEnc.OutTable))
}
lh.setType(literalsBlockCompressed)
if debugEncoder {
_, _, err := huff0.ReadTable(out, nil)
if err != nil {
panic(err)
}
}
}
lh.setSizes(len(out), len(b.literals), single)
if debugEncoder {
printf("Compressed %d literals to %d bytes", len(b.literals), len(out))
println("Adding literal header:", lh)
}
b.output = lh.appendTo(b.output)
b.output = append(b.output, out...)
b.litEnc.Reuse = huff0.ReusePolicyAllow
if debugEncoder {
println("Adding literals compressed")
}
default:
if debugEncoder {
println("Adding literals ERROR:", err)
}
return err
}
// Sequence compression
// Write the number of sequences
switch {
case len(b.sequences) < 128:
b.output = append(b.output, uint8(len(b.sequences)))
case len(b.sequences) < 0x7f00: // TODO: this could be wrong
n := len(b.sequences)
b.output = append(b.output, 128+uint8(n>>8), uint8(n))
default:
n := len(b.sequences) - 0x7f00
b.output = append(b.output, 255, uint8(n), uint8(n>>8))
}
if debugEncoder {
println("Encoding", len(b.sequences), "sequences")
}
b.genCodes()
llEnc := b.coders.llEnc
ofEnc := b.coders.ofEnc
mlEnc := b.coders.mlEnc
err = llEnc.normalizeCount(len(b.sequences))
if err != nil {
return err
}
err = ofEnc.normalizeCount(len(b.sequences))
if err != nil {
return err
}
err = mlEnc.normalizeCount(len(b.sequences))
if err != nil {
return err
}
// Choose the best compression mode for each type.
// Will evaluate the new vs predefined and previous.
chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) {
// See if predefined/previous is better
hist := cur.count[:cur.symbolLen]
nSize := cur.approxSize(hist) + cur.maxHeaderSize()
predefSize := preDef.approxSize(hist)
prevSize := prev.approxSize(hist)
// Add a small penalty for new encoders.
// Don't bother with extremely small (<2 byte gains).
nSize = nSize + (nSize+2*8*16)>>4
switch {
case predefSize <= prevSize && predefSize <= nSize || forcePreDef:
if debugEncoder {
println("Using predefined", predefSize>>3, "<=", nSize>>3)
}
return preDef, compModePredefined
case prevSize <= nSize:
if debugEncoder {
println("Using previous", prevSize>>3, "<=", nSize>>3)
}
return prev, compModeRepeat
default:
if debugEncoder {
println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes")
println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen])
}
return cur, compModeFSE
}
}
// Write compression mode
var mode uint8
if llEnc.useRLE {
mode |= uint8(compModeRLE) << 6
llEnc.setRLE(b.sequences[0].llCode)
if debugEncoder {
println("llEnc.useRLE")
}
} else {
var m seqCompMode
llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths])
mode |= uint8(m) << 6
}
if ofEnc.useRLE {
mode |= uint8(compModeRLE) << 4
ofEnc.setRLE(b.sequences[0].ofCode)
if debugEncoder {
println("ofEnc.useRLE")
}
} else {
var m seqCompMode
ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets])
mode |= uint8(m) << 4
}
if mlEnc.useRLE {
mode |= uint8(compModeRLE) << 2
mlEnc.setRLE(b.sequences[0].mlCode)
if debugEncoder {
println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen)
}
} else {
var m seqCompMode
mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths])
mode |= uint8(m) << 2
}
b.output = append(b.output, mode)
if debugEncoder {
printf("Compression modes: 0b%b", mode)
}
b.output, err = llEnc.writeCount(b.output)
if err != nil {
return err
}
start := len(b.output)
b.output, err = ofEnc.writeCount(b.output)
if err != nil {
return err
}
if false {
println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount)
fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen)
for i, v := range ofEnc.norm[:ofEnc.symbolLen] {
fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v)
}
}
b.output, err = mlEnc.writeCount(b.output)
if err != nil {
return err
}
// Maybe in block?
wr := &b.wr
wr.reset(b.output)
var ll, of, ml cState
// Current sequence
seq := len(b.sequences) - 1
s := b.sequences[seq]
llEnc.setBits(llBitsTable[:])
mlEnc.setBits(mlBitsTable[:])
ofEnc.setBits(nil)
llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256]
// We have 3 bounds checks here (and in the loop).
// Since we are iterating backwards it is kinda hard to avoid.
llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
ll.init(wr, &llEnc.ct, llB)
of.init(wr, &ofEnc.ct, ofB)
wr.flush32()
ml.init(wr, &mlEnc.ct, mlB)
// Each of these lookups also generates a bounds check.
wr.addBits32NC(s.litLen, llB.outBits)
wr.addBits32NC(s.matchLen, mlB.outBits)
wr.flush32()
wr.addBits32NC(s.offset, ofB.outBits)
if debugSequences {
println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
}
seq--
// Store sequences in reverse...
for seq >= 0 {
s = b.sequences[seq]
ofB := ofTT[s.ofCode]
wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
//of.encode(ofB)
nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
wr.addBits16NC(of.state, uint8(nbBitsOut))
of.state = of.stateTable[dstState]
// Accumulate extra bits.
outBits := ofB.outBits & 31
extraBits := uint64(s.offset & bitMask32[outBits])
extraBitsN := outBits
mlB := mlTT[s.mlCode]
//ml.encode(mlB)
nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
wr.addBits16NC(ml.state, uint8(nbBitsOut))
ml.state = ml.stateTable[dstState]
outBits = mlB.outBits & 31
extraBits = extraBits<<outBits | uint64(s.matchLen&bitMask32[outBits])
extraBitsN += outBits
llB := llTT[s.llCode]
//ll.encode(llB)
nbBitsOut = (uint32(ll.state) + llB.deltaNbBits) >> 16
dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
wr.addBits16NC(ll.state, uint8(nbBitsOut))
ll.state = ll.stateTable[dstState]
outBits = llB.outBits & 31
extraBits = extraBits<<outBits | uint64(s.litLen&bitMask32[outBits])
extraBitsN += outBits
wr.flush32()
wr.addBits64NC(extraBits, extraBitsN)
if debugSequences {
println("Encoded seq", seq, s)
}
seq--
}
ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog)
ll.flush(llEnc.actualTableLog)
wr.close()
b.output = wr.out
// Maybe even add a bigger margin.
if len(b.output)-3-bhOffset >= b.size {
// Discard and encode as raw block.
b.output = b.encodeRawTo(b.output[:bhOffset], org)
b.popOffsets()
b.litEnc.Reuse = huff0.ReusePolicyNone
return nil
}
// Size is output minus block header.
bh.setSize(uint32(len(b.output)-bhOffset) - 3)
if debugEncoder {
println("Rewriting block header", bh)
}
_ = bh.appendTo(b.output[bhOffset:bhOffset])
b.coders.setPrev(llEnc, mlEnc, ofEnc)
return nil
}
var errIncompressible = errors.New("incompressible")
func (b *blockEnc) genCodes() {
if len(b.sequences) == 0 {
// nothing to do
return
}
if len(b.sequences) > math.MaxUint16 {
panic("can only encode up to 64K sequences")
}
// No bounds checks after here:
llH := b.coders.llEnc.Histogram()
ofH := b.coders.ofEnc.Histogram()
mlH := b.coders.mlEnc.Histogram()
for i := range llH {
llH[i] = 0
}
for i := range ofH {
ofH[i] = 0
}
for i := range mlH {
mlH[i] = 0
}
var llMax, ofMax, mlMax uint8
for i := range b.sequences {
seq := &b.sequences[i]
v := llCode(seq.litLen)
seq.llCode = v
llH[v]++
if v > llMax {
llMax = v
}
v = ofCode(seq.offset)
seq.ofCode = v
ofH[v]++
if v > ofMax {
ofMax = v
}
v = mlCode(seq.matchLen)
seq.mlCode = v
mlH[v]++
if v > mlMax {
mlMax = v
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
}
}
}
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
if debugAsserts && ofMax > maxOffsetBits {
panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax))
}
if debugAsserts && llMax > maxLiteralLengthSymbol {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
}
// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT.
package zstd
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[blockTypeRaw-0]
_ = x[blockTypeRLE-1]
_ = x[blockTypeCompressed-2]
_ = x[blockTypeReserved-3]
}
const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved"
var _blockType_index = [...]uint8{0, 12, 24, 43, 60}
func (i blockType) String() string {
if i >= blockType(len(_blockType_index)-1) {
return "blockType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _blockType_name[_blockType_index[i]:_blockType_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[literalsBlockRaw-0]
_ = x[literalsBlockRLE-1]
_ = x[literalsBlockCompressed-2]
_ = x[literalsBlockTreeless-3]
}
const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless"
var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76}
func (i literalsBlockType) String() string {
if i >= literalsBlockType(len(_literalsBlockType_index)-1) {
return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[compModePredefined-0]
_ = x[compModeRLE-1]
_ = x[compModeFSE-2]
_ = x[compModeRepeat-3]
}
const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat"
var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54}
func (i seqCompMode) String() string {
if i >= seqCompMode(len(_seqCompMode_index)-1) {
return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]]
}
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[tableLiteralLengths-0]
_ = x[tableOffsets-1]
_ = x[tableMatchLengths-2]
}
const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths"
var _tableIndex_index = [...]uint8{0, 19, 31, 48}
func (i tableIndex) String() string {
if i >= tableIndex(len(_tableIndex_index)-1) {
return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]]
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"fmt"
"io"
)
type byteBuffer interface {
// Read up to 8 bytes.
// Returns io.ErrUnexpectedEOF if this cannot be satisfied.
readSmall(n int) ([]byte, error)
// Read >8 bytes.
// MAY use the destination slice.
readBig(n int, dst []byte) ([]byte, error)
// Read a single byte.
readByte() (byte, error)
// Skip n bytes.
skipN(n int64) error
}
// in-memory buffer
type byteBuf []byte
func (b *byteBuf) readSmall(n int) ([]byte, error) {
if debugAsserts && n > 8 {
panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
}
bb := *b
if len(bb) < n {
return nil, io.ErrUnexpectedEOF
}
r := bb[:n]
*b = bb[n:]
return r, nil
}
func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
bb := *b
if len(bb) < n {
return nil, io.ErrUnexpectedEOF
}
r := bb[:n]
*b = bb[n:]
return r, nil
}
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
return 0, io.ErrUnexpectedEOF
}
r := bb[0]
*b = bb[1:]
return r, nil
}
func (b *byteBuf) skipN(n int64) error {
bb := *b
if n < 0 {
return fmt.Errorf("negative skip (%d) requested", n)
}
if int64(len(bb)) < n {
return io.ErrUnexpectedEOF
}
*b = bb[n:]
return nil
}
// wrapper around a reader.
type readerWrapper struct {
r io.Reader
tmp [8]byte
}
func (r *readerWrapper) readSmall(n int) ([]byte, error) {
if debugAsserts && n > 8 {
panic(fmt.Errorf("small read > 8 (%d). use readBig", n))
}
n2, err := io.ReadFull(r.r, r.tmp[:n])
// We only really care about the actual bytes read.
if err != nil {
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
}
if debugDecoder {
println("readSmall: got", n2, "want", n, "err", err)
}
return nil, err
}
return r.tmp[:n], nil
}
func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
if cap(dst) < n {
dst = make([]byte, n)
}
n2, err := io.ReadFull(r.r, dst[:n])
if err == io.EOF && n > 0 {
err = io.ErrUnexpectedEOF
}
return dst[:n2], err
}
func (r *readerWrapper) readByte() (byte, error) {
n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, err
}
if n2 != 1 {
return 0, io.ErrUnexpectedEOF
}
return r.tmp[0], nil
}
func (r *readerWrapper) skipN(n int64) error {
n2, err := io.CopyN(io.Discard, r.r, n)
if n2 != n {
err = io.ErrUnexpectedEOF
}
return err
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// byteReader provides a byte reader that reads
// little endian values from a byte stream.
// The input stream is manually advanced.
// The reader performs no bounds checks.
type byteReader struct {
b []byte
off int
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// overread returns whether we have advanced too far.
func (b *byteReader) overread() bool {
return b.off > len(b.b)
}
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
b2 := b.b[b.off:]
b2 = b2[:4]
v3 := int32(b2[3])
v2 := int32(b2[2])
v1 := int32(b2[1])
v0 := int32(b2[0])
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
}
// Uint8 returns the next byte
func (b *byteReader) Uint8() uint8 {
v := b.b[b.off]
return v
}
// Uint32 returns a little endian uint32 starting at current offset.
func (b byteReader) Uint32() uint32 {
if r := b.remain(); r < 4 {
// Very rare
v := uint32(0)
for i := 1; i <= r; i++ {
v = (v << 8) | uint32(b.b[len(b.b)-i])
}
return v
}
b2 := b.b[b.off:]
b2 = b2[:4]
v3 := uint32(b2[3])
v2 := uint32(b2[2])
v1 := uint32(b2[1])
v0 := uint32(b2[0])
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
}
// Uint32NC returns a little endian uint32 starting at current offset.
// The caller must be sure if there are at least 4 bytes left.
func (b byteReader) Uint32NC() uint32 {
b2 := b.b[b.off:]
b2 = b2[:4]
v3 := uint32(b2[3])
v2 := uint32(b2[2])
v1 := uint32(b2[1])
v0 := uint32(b2[0])
return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24)
}
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
}
// Copyright 2020+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
import (
"encoding/binary"
"errors"
"io"
)
// HeaderMaxSize is the maximum size of a Frame and Block Header.
// If less is sent to Header.Decode it *may* still contain enough information.
const HeaderMaxSize = 14 + 3
// Header contains information about the first frame and block within that.
type Header struct {
// SingleSegment specifies whether the data is to be decompressed into a
// single contiguous memory segment.
// It implies that WindowSize is invalid and that FrameContentSize is valid.
SingleSegment bool
// WindowSize is the window of data to keep while decoding.
// Will only be set if SingleSegment is false.
WindowSize uint64
// Dictionary ID.
// If 0, no dictionary.
DictionaryID uint32
// HasFCS specifies whether FrameContentSize has a valid value.
HasFCS bool
// FrameContentSize is the expected uncompressed size of the entire frame.
FrameContentSize uint64
// Skippable will be true if the frame is meant to be skipped.
// This implies that FirstBlock.OK is false.
Skippable bool
// SkippableID is the user-specific ID for the skippable frame.
// Valid values are between 0 to 15, inclusive.
SkippableID int
// SkippableSize is the length of the user data to skip following
// the header.
SkippableSize uint32
// HeaderSize is the raw size of the frame header.
//
// For normal frames, it includes the size of the magic number and
// the size of the header (per section 3.1.1.1).
// It does not include the size for any data blocks (section 3.1.1.2) nor
// the size for the trailing content checksum.
//
// For skippable frames, this counts the size of the magic number
// along with the size of the size field of the payload.
// It does not include the size of the skippable payload itself.
// The total frame size is the HeaderSize plus the SkippableSize.
HeaderSize int
// First block information.
FirstBlock struct {
// OK will be set if first block could be decoded.
OK bool
// Is this the last block of a frame?
Last bool
// Is the data compressed?
// If true CompressedSize will be populated.
// Unfortunately DecompressedSize cannot be determined
// without decoding the blocks.
Compressed bool
// DecompressedSize is the expected decompressed size of the block.
// Will be 0 if it cannot be determined.
DecompressedSize int
// CompressedSize of the data in the block.
// Does not include the block header.
// Will be equal to DecompressedSize if not Compressed.
CompressedSize int
}
// If set there is a checksum present for the block content.
// The checksum field at the end is always 4 bytes long.
HasCheckSum bool
}
// Decode the header from the beginning of the stream.
// This will decode the frame header and the first block header if enough bytes are provided.
// It is recommended to provide at least HeaderMaxSize bytes.
// If the frame header cannot be read an error will be returned.
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
_, err := h.DecodeAndStrip(in)
return err
}
// DecodeAndStrip will decode the header from the beginning of the stream
// and on success return the remaining bytes.
// This will decode the frame header and the first block header if enough bytes are provided.
// It is recommended to provide at least HeaderMaxSize bytes.
// If the frame header cannot be read an error will be returned.
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
*h = Header{}
if len(in) < 4 {
return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
b, in := in[:4], in[4:]
if string(b) != frameMagic {
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
return nil, ErrMagicMismatch
}
if len(in) < 4 {
return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
h.Skippable = true
h.SkippableID = int(b[0] & 0xf)
h.SkippableSize = binary.LittleEndian.Uint32(in)
return in[4:], nil
}
// Read Window_Descriptor
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
return nil, io.ErrUnexpectedEOF
}
fhd, in := in[0], in[1:]
h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
if fhd&(1<<3) != 0 {
return nil, errors.New("reserved bit set on frame header")
}
if !h.SingleSegment {
if len(in) < 1 {
return nil, io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
h.HeaderSize++
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
h.WindowSize = windowBase + windowAdd
}
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
}
if len(in) < int(size) {
return nil, io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
h.HeaderSize += int(size)
switch len(b) {
case 1:
h.DictionaryID = uint32(b[0])
case 2:
h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8)
case 4:
h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}
}
// Read Frame_Content_Size
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
var fcsSize int
v := fhd >> 6
switch v {
case 0:
if h.SingleSegment {
fcsSize = 1
}
default:
fcsSize = 1 << v
}
if fcsSize > 0 {
h.HasFCS = true
if len(in) < fcsSize {
return nil, io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize)
switch len(b) {
case 1:
h.FrameContentSize = uint64(b[0])
case 2:
// When FCS_Field_Size is 2, the offset of 256 is added.
h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
case 4:
h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
case 8:
d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
h.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
}
// Frame Header done, we will not fail from now on.
if len(in) < 3 {
return in, nil
}
tmp := in[:3]
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
h.FirstBlock.Last = bh&1 != 0
blockType := blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
switch blockType {
case blockTypeReserved:
return in, nil
case blockTypeRLE:
h.FirstBlock.Compressed = true
h.FirstBlock.DecompressedSize = cSize
h.FirstBlock.CompressedSize = 1
case blockTypeCompressed:
h.FirstBlock.Compressed = true
h.FirstBlock.CompressedSize = cSize
case blockTypeRaw:
h.FirstBlock.DecompressedSize = cSize
h.FirstBlock.CompressedSize = cSize
default:
panic("Invalid block type")
}
h.FirstBlock.OK = true
return in, nil
}
// AppendTo will append the encoded header to the dst slice.
// There is no error checking performed on the header values.
func (h *Header) AppendTo(dst []byte) ([]byte, error) {
if h.Skippable {
magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
magic[0] |= byte(h.SkippableID & 0xf)
dst = append(dst, magic[:]...)
f := h.SkippableSize
return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
}
f := frameHeader{
ContentSize: h.FrameContentSize,
WindowSize: uint32(h.WindowSize),
SingleSegment: h.SingleSegment,
Checksum: h.HasCheckSum,
DictID: h.DictionaryID,
}
return f.appendTo(dst), nil
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"context"
"encoding/binary"
"io"
"sync"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
// Decoder provides decoding of zstandard streams.
// The decoder has been designed to operate without allocations after a warmup.
// This means that you should store the decoder for best performance.
// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream.
// A decoder can safely be re-used even if the previous stream failed.
// To release the resources, you must call the Close() function on a decoder.
type Decoder struct {
o decoderOptions
// Unreferenced decoders, ready for use.
decoders chan *blockDec
// Current read position used for Reader functionality.
current decoderState
// sync stream decoding
syncStream struct {
decodedFrame uint64
br readerWrapper
enabled bool
inFrame bool
dstBuf []byte
}
frame *frameDec
// Custom dictionaries.
dicts map[uint32]*dict
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
}
// decoderState is used for maintaining state when the decoder
// is used for streaming.
type decoderState struct {
// current block being written to stream.
decodeOutput
// output in order to be written to stream.
output chan decodeOutput
// cancel remaining output.
cancel context.CancelFunc
// crc of current frame
crc *xxhash.Digest
flushed bool
}
var (
// Check the interfaces we want to support.
_ = io.WriterTo(&Decoder{})
_ = io.Reader(&Decoder{})
)
// NewReader creates a new decoder.
// A nil Reader can be provided in which case Reset can be used to start a decode.
//
// A Decoder can be used in two modes:
//
// 1) As a stream, or
// 2) For stateless decoding using DecodeAll.
//
// Only a single stream can be decoded concurrently, but the same decoder
// can run multiple concurrent stateless decodes. It is even possible to
// use stateless decodes while a stream is being decoded.
//
// The Reset function can be used to initiate a new stream, which will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
initPredefined()
var d Decoder
d.o.setDefault()
for _, o := range opts {
err := o(&d.o)
if err != nil {
return nil, err
}
}
d.current.crc = xxhash.New()
d.current.flushed = true
if r == nil {
d.current.err = ErrDecoderNilInput
}
// Transfer option dicts.
d.dicts = make(map[uint32]*dict, len(d.o.dicts))
for _, dc := range d.o.dicts {
d.dicts[dc.id] = dc
}
d.o.dicts = nil
// Create decoders
d.decoders = make(chan *blockDec, d.o.concurrent)
for i := 0; i < d.o.concurrent; i++ {
dec := newBlockDec(d.o.lowMem)
dec.localFrame = newFrameDec(d.o)
d.decoders <- dec
}
if r == nil {
return &d, nil
}
return &d, d.Reset(r)
}
// Read bytes from the decompressed stream into p.
// Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
var n int
for {
if len(d.current.b) > 0 {
filled := copy(p, d.current.b)
p = p[filled:]
d.current.b = d.current.b[filled:]
n += filled
}
if len(p) == 0 {
break
}
if len(d.current.b) == 0 {
// We have an error and no more data
if d.current.err != nil {
break
}
if !d.nextBlock(n == 0) {
return n, d.current.err
}
}
}
if len(d.current.b) > 0 {
if debugDecoder {
println("returning", n, "still bytes left:", len(d.current.b))
}
// Only return error at end of block
return n, nil
}
if d.current.err != nil {
d.drainOutput()
}
if debugDecoder {
println("returning", n, d.current.err, len(d.decoders))
}
return n, d.current.err
}
// Reset will reset the decoder the supplied stream after the current has finished processing.
// Note that this functionality cannot be used after Close has been called.
// Reset can be called with a nil reader to release references to the previous reader.
// After being called with a nil reader, no other operations than Reset or DecodeAll or Close
// should be used.
func (d *Decoder) Reset(r io.Reader) error {
if d.current.err == ErrDecoderClosed {
return d.current.err
}
d.drainOutput()
d.syncStream.br.r = nil
if r == nil {
d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 {
d.current.b = d.current.b[:0]
}
d.current.flushed = true
return nil
}
// If bytes buffer and < 5MB, do sync decoding anyway.
if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
bb2 := bb
if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb2.Bytes()
var dst []byte
if cap(d.syncStream.dstBuf) > 0 {
dst = d.syncStream.dstBuf[:0]
}
dst, err := d.DecodeAll(b, dst)
if err == nil {
err = io.EOF
}
// Save output buffer
d.syncStream.dstBuf = dst
d.current.b = dst
d.current.err = err
d.current.flushed = true
if debugDecoder {
println("sync decode to", len(dst), "bytes, err:", err)
}
return nil
}
// Remove current block.
d.stashDecoder()
d.current.decodeOutput = decodeOutput{}
d.current.err = nil
d.current.flushed = false
d.current.d = nil
d.syncStream.dstBuf = nil
// Ensure no-one else is still running...
d.streamWg.Wait()
if d.frame == nil {
d.frame = newFrameDec(d.o)
}
if d.o.concurrent == 1 {
return d.startSyncDecoder(r)
}
d.current.output = make(chan decodeOutput, d.o.concurrent)
ctx, cancel := context.WithCancel(context.Background())
d.current.cancel = cancel
d.streamWg.Add(1)
go d.startStreamDecoder(ctx, r, d.current.output)
return nil
}
// drainOutput will drain the output until errEndOfStream is sent.
func (d *Decoder) drainOutput() {
if d.current.cancel != nil {
if debugDecoder {
println("cancelling current")
}
d.current.cancel()
d.current.cancel = nil
}
if d.current.d != nil {
if debugDecoder {
printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders))
}
d.decoders <- d.current.d
d.current.d = nil
d.current.b = nil
}
if d.current.output == nil || d.current.flushed {
println("current already flushed")
return
}
for v := range d.current.output {
if v.d != nil {
if debugDecoder {
printf("re-adding decoder %p", v.d)
}
d.decoders <- v.d
}
}
d.current.output = nil
d.current.flushed = true
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
var n int64
for {
if len(d.current.b) > 0 {
n2, err2 := w.Write(d.current.b)
n += int64(n2)
if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) {
d.current.err = err2
} else if n2 != len(d.current.b) {
d.current.err = io.ErrShortWrite
}
}
if d.current.err != nil {
break
}
d.nextBlock(true)
}
err := d.current.err
if err != nil {
d.drainOutput()
}
if err == io.EOF {
err = nil
}
return n, err
}
// DecodeAll allows stateless decoding of a blob of bytes.
// Output will be appended to dst, so if the destination size is known
// you can pre-allocate the destination slice to avoid allocations.
// DecodeAll can be used concurrently.
// The Decoder concurrency limits will be respected.
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if d.decoders == nil {
return dst, ErrDecoderClosed
}
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
}
frame.rawInput = nil
frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
frame.history.decoders.br.cursor = 0
}
d.decoders <- block
}()
frame.bBuf = input
for {
frame.history.reset()
err := frame.reset(&frame.bBuf)
if err != nil {
if err == io.EOF {
if debugDecoder {
println("frame reset return EOF")
}
return dst, nil
}
return dst, err
}
if err = d.setDict(frame); err != nil {
return nil, err
}
if frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
}
return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize != fcsUnknown {
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
if debugDecoder {
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
}
return dst, ErrDecoderSizeExceeded
}
if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
if debugDecoder {
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
}
return dst, ErrDecoderSizeExceeded
}
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
// Cap to 1 MB.
if size > 1<<20 {
size = 1 << 20
}
if uint64(size) > d.o.maxDecodedSize {
size = int(d.o.maxDecodedSize)
}
dst = make([]byte, 0, size)
}
dst, err = frame.runDecoder(dst, block)
if err != nil {
return dst, err
}
if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
return dst, ErrDecoderSizeExceeded
}
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
}
break
}
}
return dst, nil
}
// nextBlock returns the next block.
// If an error occurs d.err will be set.
// Optionally the function can block for new output.
// If non-blocking mode is used the returned boolean will be false
// if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
if d.current.err != nil {
// Keep error state.
return false
}
d.current.b = d.current.b[:0]
// SYNC:
if d.syncStream.enabled {
if !blocking {
return false
}
ok = d.nextBlockSync()
if !ok {
d.stashDecoder()
}
return ok
}
//ASYNC:
d.stashDecoder()
if blocking {
d.current.decodeOutput, ok = <-d.current.output
} else {
select {
case d.current.decodeOutput, ok = <-d.current.output:
default:
return false
}
}
if !ok {
// This should not happen, so signal error state...
d.current.err = io.ErrUnexpectedEOF
return false
}
next := d.current.decodeOutput
if next.d != nil && next.d.async.newHist != nil {
d.current.crc.Reset()
}
if debugDecoder {
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
if d.o.ignoreChecksum {
return true
}
if len(next.b) > 0 {
d.current.crc.Write(next.b)
}
if next.err == nil && next.d != nil && next.d.hasCRC {
got := uint32(d.current.crc.Sum64())
if got != next.d.checkCRC {
if debugDecoder {
printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
}
d.current.err = ErrCRCMismatch
} else {
if debugDecoder {
printf("CRC ok %08x\n", got)
}
}
}
return true
}
func (d *Decoder) nextBlockSync() (ok bool) {
if d.current.d == nil {
d.current.d = <-d.decoders
}
for len(d.current.b) == 0 {
if !d.syncStream.inFrame {
d.frame.history.reset()
d.current.err = d.frame.reset(&d.syncStream.br)
if d.current.err == nil {
d.current.err = d.setDict(d.frame)
}
if d.current.err != nil {
return false
}
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
d.current.err = ErrDecoderSizeExceeded
return false
}
d.syncStream.decodedFrame = 0
d.syncStream.inFrame = true
}
d.current.err = d.frame.next(d.current.d)
if d.current.err != nil {
return false
}
d.frame.history.ensureBlock()
if debugDecoder {
println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
}
histBefore := len(d.frame.history.b)
d.current.err = d.current.d.decodeBuf(&d.frame.history)
if d.current.err != nil {
println("error after:", d.current.err)
return false
}
d.current.b = d.frame.history.b[histBefore:]
if debugDecoder {
println("history after:", len(d.frame.history.b))
}
// Check frame size (before CRC)
d.syncStream.decodedFrame += uint64(len(d.current.b))
if d.syncStream.decodedFrame > d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
d.current.err = ErrFrameSizeExceeded
return false
}
// Check FCS
if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
d.current.err = ErrFrameSizeMismatch
return false
}
// Update/Check CRC
if d.frame.HasCheckSum {
if !d.o.ignoreChecksum {
d.frame.crc.Write(d.current.b)
}
if d.current.d.Last {
if !d.o.ignoreChecksum {
d.current.err = d.frame.checkCRC()
} else {
d.current.err = d.frame.consumeCRC()
}
if d.current.err != nil {
println("CRC error:", d.current.err)
return false
}
}
}
d.syncStream.inFrame = !d.current.d.Last
}
return true
}
func (d *Decoder) stashDecoder() {
if d.current.d != nil {
if debugDecoder {
printf("re-adding current decoder %p", d.current.d)
}
d.decoders <- d.current.d
d.current.d = nil
}
}
// Close will release all resources.
// It is NOT possible to reuse the decoder after this.
func (d *Decoder) Close() {
if d.current.err == ErrDecoderClosed {
return
}
d.drainOutput()
if d.current.cancel != nil {
d.current.cancel()
d.streamWg.Wait()
d.current.cancel = nil
}
if d.decoders != nil {
close(d.decoders)
for dec := range d.decoders {
dec.Close()
}
d.decoders = nil
}
if d.current.d != nil {
d.current.d.Close()
d.current.d = nil
}
d.current.err = ErrDecoderClosed
}
// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
// Any changes to the decoder will be reflected, so the returned ReadCloser
// can be reused along with the decoder.
// io.WriterTo is also supported by the returned ReadCloser.
func (d *Decoder) IOReadCloser() io.ReadCloser {
return closeWrapper{d: d}
}
// closeWrapper wraps a function call as a closer.
type closeWrapper struct {
d *Decoder
}
// WriteTo forwards WriteTo calls to the decoder.
func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
return c.d.WriteTo(w)
}
// Read forwards read calls to the decoder.
func (c closeWrapper) Read(p []byte) (n int, err error) {
return c.d.Read(p)
}
// Close closes the decoder.
func (c closeWrapper) Close() error {
c.d.Close()
return nil
}
type decodeOutput struct {
d *blockDec
b []byte
err error
}
func (d *Decoder) startSyncDecoder(r io.Reader) error {
d.frame.history.reset()
d.syncStream.br = readerWrapper{r: r}
d.syncStream.inFrame = false
d.syncStream.enabled = true
d.syncStream.decodedFrame = 0
return nil
}
// Create Decoder:
// ASYNC:
// Spawn 3 go routines.
// 0: Read frames and decode block literals.
// 1: Decode sequences.
// 2: Execute sequences, send to output.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
br := readerWrapper{r: r}
var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent)
// Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
for block := range seqDecode {
if hasErr {
if block != nil {
seqExecute <- block
}
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
}
hist.reset()
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqExecute <- block
continue
}
hist.decoders.literals = block.async.literals
block.err = block.prepareSequences(block.async.seqData, &hist)
if debugDecoder && block.err != nil {
println("prepareSequences returned:", block.err)
}
hasErr = block.err != nil
if block.err == nil {
block.err = block.decodeSequences(&hist)
if debugDecoder && block.err != nil {
println("decodeSequences returned:", block.err)
}
hasErr = block.err != nil
// block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
block.async.seqSize = hist.decoders.seqSize
}
seqExecute <- block
}
close(seqExecute)
hist.reset()
}()
var wg sync.WaitGroup
wg.Add(1)
// Async 3: Execute sequences...
frameHistCache := d.frame.history.b
go func() {
var hist history
var decodedFrame uint64
var fcs uint64
var hasErr bool
for block := range seqExecute {
out := decodeOutput{err: block.err, d: block}
if block.err != nil || hasErr {
hasErr = true
output <- out
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 2: new history")
}
hist.reset()
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
if cap(hist.b) < hist.allocFrameBuffer {
if cap(frameHistCache) >= hist.allocFrameBuffer {
hist.b = frameHistCache
} else {
hist.b = make([]byte, 0, hist.allocFrameBuffer)
println("Alloc history sized", hist.allocFrameBuffer)
}
}
hist.b = hist.b[:0]
fcs = block.async.fcs
decodedFrame = 0
}
do := decodeOutput{err: block.err, d: block}
switch block.Type {
case blockTypeRLE:
if debugDecoder {
println("add rle block length:", block.RLESize)
}
if cap(block.dst) < int(block.RLESize) {
if block.lowMem {
block.dst = make([]byte, block.RLESize)
} else {
block.dst = make([]byte, maxCompressedBlockSize)
}
}
block.dst = block.dst[:block.RLESize]
v := block.data[0]
for i := range block.dst {
block.dst[i] = v
}
hist.append(block.dst)
do.b = block.dst
case blockTypeRaw:
if debugDecoder {
println("add raw block length:", len(block.data))
}
hist.append(block.data)
do.b = block.data
case blockTypeCompressed:
if debugDecoder {
println("execute with history length:", len(hist.b), "window:", hist.windowSize)
}
hist.decoders.seqSize = block.async.seqSize
hist.decoders.literals = block.async.literals
do.err = block.executeSequences(&hist)
hasErr = do.err != nil
if debugDecoder && hasErr {
println("executeSequences returned:", do.err)
}
do.b = block.dst
}
if !hasErr {
decodedFrame += uint64(len(do.b))
if decodedFrame > fcs {
println("fcs exceeded", block.Last, fcs, decodedFrame)
do.err = ErrFrameSizeExceeded
hasErr = true
} else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
do.err = ErrFrameSizeMismatch
hasErr = true
} else {
if debugDecoder {
println("fcs ok", block.Last, fcs, decodedFrame)
}
}
}
output <- do
}
close(output)
frameHistCache = hist.b
wg.Done()
if debugDecoder {
println("decoder goroutines finished")
}
hist.reset()
}()
var hist history
decodeStream:
for {
var hasErr bool
hist.reset()
decodeBlock := func(block *blockDec) {
if hasErr {
if block != nil {
seqDecode <- block
}
return
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
return
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
frame := d.frame
if debugDecoder {
println("New frame...")
}
var historySent bool
frame.history.reset()
err := frame.reset(&br)
if debugDecoder && err != nil {
println("Frame decoder returned", err)
}
if err == nil {
err = d.setDict(frame)
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
}
err = ErrDecoderSizeExceeded
}
if err != nil {
select {
case <-ctx.Done():
case dec := <-d.decoders:
dec.sendErr(err)
decodeBlock(dec)
}
break decodeStream
}
// Go through all blocks of the frame.
for {
var dec *blockDec
select {
case <-ctx.Done():
break decodeStream
case dec = <-d.decoders:
// Once we have a decoder, we MUST return it.
}
err := frame.next(dec)
if !historySent {
h := frame.history
if debugDecoder {
println("Alloc History:", h.allocFrameBuffer)
}
hist.reset()
if h.dict != nil {
hist.setDict(h.dict)
}
dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize
historySent = true
} else {
dec.async.newHist = nil
}
if debugDecoder && err != nil {
println("next block returned error:", err)
}
dec.err = err
dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4)
if len(crc) < 4 {
if err == nil {
err = io.ErrUnexpectedEOF
}
println("CRC missing?", err)
dec.err = err
} else {
dec.checkCRC = binary.LittleEndian.Uint32(crc)
dec.hasCRC = true
if debugDecoder {
printf("found crc to check: %08x\n", dec.checkCRC)
}
}
}
err = dec.err
last := dec.Last
decodeBlock(dec)
if err != nil {
break decodeStream
}
if last {
break
}
}
}
close(seqDecode)
wg.Wait()
hist.reset()
d.frame.history.b = frameHistCache
}
func (d *Decoder) setDict(frame *frameDec) (err error) {
dict, ok := d.dicts[frame.DictionaryID]
if ok {
if debugDecoder {
println("setting dict", frame.DictionaryID)
}
frame.history.setDict(dict)
} else if frame.DictionaryID != 0 {
// A zero or missing dictionary id is ambiguous:
// either dictionary zero, or no dictionary. In particular,
// zstd --patch-from uses this id for the source file,
// so only return an error if the dictionary id is not zero.
err = ErrUnknownDictionary
}
return err
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"errors"
"fmt"
"math/bits"
"runtime"
)
// DOption is an option for creating a decoder.
type DOption func(*decoderOptions) error
// options retains accumulated state of multiple options.
type decoderOptions struct {
lowMem bool
concurrent int
maxDecodedSize uint64
maxWindowSize uint64
dicts []*dict
ignoreChecksum bool
limitToCap bool
decodeBufsBelow int
}
func (o *decoderOptions) setDefault() {
*o = decoderOptions{
// use less ram: true for now, but may change.
lowMem: true,
concurrent: runtime.GOMAXPROCS(0),
maxWindowSize: MaxWindowSize,
decodeBufsBelow: 128 << 10,
}
if o.concurrent > 4 {
o.concurrent = 4
}
o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
// but possibly have to allocate more while running.
func WithDecoderLowmem(b bool) DOption {
return func(o *decoderOptions) error { o.lowMem = b; return nil }
}
// WithDecoderConcurrency sets the number of created decoders.
// When decoding block with DecodeAll, this will limit the number
// of possible concurrently running decodes.
// When decoding streams, this will limit the number of
// inflight blocks.
// When decoding streams and setting maximum to 1,
// no async decoding will be done.
// When a value of 0 is provided GOMAXPROCS will be used.
// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
func WithDecoderConcurrency(n int) DOption {
return func(o *decoderOptions) error {
if n < 0 {
return errors.New("concurrency must be at least 1")
}
if n == 0 {
o.concurrent = runtime.GOMAXPROCS(0)
} else {
o.concurrent = n
}
return nil
}
}
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
return errors.New("WithDecoderMaxMemory must be at least 1")
}
if n > 1<<63 {
return errors.New("WithDecoderMaxmemory must be less than 1 << 63")
}
o.maxDecodedSize = n
return nil
}
}
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
//
// Each slice in dict must be in the [dictionary format] produced by
// "zstd --train" from the Zstandard reference implementation.
//
// If several dictionaries with the same ID are provided, the last one will be used.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithDecoderDicts(dicts ...[]byte) DOption {
return func(o *decoderOptions) error {
for _, b := range dicts {
d, err := loadDict(b)
if err != nil {
return err
}
o.dicts = append(o.dicts, d)
}
return nil
}
}
// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data.
func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
return nil
}
}
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
// This allows rejecting packets that will cause big memory usage.
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
// If WithDecoderMaxMemory is set to a lower value, that will be used.
// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec.
func WithDecoderMaxWindow(size uint64) DOption {
return func(o *decoderOptions) error {
if size < MinWindowSize {
return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes")
}
if size > (1<<41)+7*(1<<38) {
return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB")
}
o.maxWindowSize = size
return nil
}
}
// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
// or any size set in WithDecoderMaxMemory.
// This can be used to limit decoding to a specific maximum output size.
// Disabled by default.
func WithDecodeAllCapLimit(b bool) DOption {
return func(o *decoderOptions) error {
o.limitToCap = b
return nil
}
}
// WithDecodeBuffersBelow will fully decode readers that have a
// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
// This typically uses less allocations but will have the full decompressed object in memory.
// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
// Default is 128KiB.
func WithDecodeBuffersBelow(size int) DOption {
return func(o *decoderOptions) error {
o.decodeBufsBelow = size
return nil
}
}
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"bufio"
"bytes"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"log"
"math/rand"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
// "github.com/DataDog/zstd"
// zstd "github.com/valyala/gozstd"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
func TestNewReaderMismatch(t *testing.T) {
// To identify a potential decoding error, do the following steps:
// 1) Place the compressed file in testdata, eg 'testdata/backup.bin.zst'
// 2) Decompress the file to using zstd, so it will be named 'testdata/backup.bin'
// 3) Run the test. A hash file will be generated 'testdata/backup.bin.hash'
// 4) The decoder will also run and decode the file. It will stop as soon as a mismatch is found.
// The hash file will be reused between runs if present.
const baseFile = "testdata/backup.bin"
const blockSize = 1024
hashes, err := os.ReadFile(baseFile + ".hash")
if os.IsNotExist(err) {
// Create the hash file.
f, err := os.Open(baseFile)
if os.IsNotExist(err) {
t.Skip("no decompressed file found")
return
}
defer f.Close()
br := bufio.NewReader(f)
var tmp [8]byte
xx := xxhash.New()
for {
xx.Reset()
buf := make([]byte, blockSize)
n, err := io.ReadFull(br, buf)
if err != nil {
if err != io.EOF && err != io.ErrUnexpectedEOF {
t.Fatal(err)
}
}
if n > 0 {
_, _ = xx.Write(buf[:n])
binary.LittleEndian.PutUint64(tmp[:], xx.Sum64())
hashes = append(hashes, tmp[4:]...)
}
if n != blockSize {
break
}
}
err = os.WriteFile(baseFile+".hash", hashes, os.ModePerm)
if err != nil {
// We can continue for now
t.Error(err)
}
t.Log("Saved", len(hashes)/4, "hashes as", baseFile+".hash")
}
f, err := os.Open(baseFile + ".zst")
if os.IsNotExist(err) {
t.Skip("no compressed file found")
return
}
defer f.Close()
dec, err := NewReader(f, WithDecoderConcurrency(1))
if err != nil {
t.Fatal(err)
}
defer dec.Close()
var tmp [8]byte
xx := xxhash.New()
var cHash int
for {
xx.Reset()
buf := make([]byte, blockSize)
n, err := io.ReadFull(dec, buf)
if err != nil {
if err != io.EOF && err != io.ErrUnexpectedEOF {
t.Fatal("block", cHash, "err:", err)
}
}
if n > 0 {
if cHash+4 > len(hashes) {
extra, _ := io.Copy(io.Discard, dec)
t.Fatal("not enough hashes (length mismatch). Only have", len(hashes)/4, "hashes. Got block of", n, "bytes and", extra, "bytes still on stream.")
}
_, _ = xx.Write(buf[:n])
binary.LittleEndian.PutUint64(tmp[:], xx.Sum64())
want, got := hashes[cHash:cHash+4], tmp[4:]
if !bytes.Equal(want, got) {
org, err := os.Open(baseFile)
if err == nil {
const sizeBack = 8 << 20
defer org.Close()
start := int64(cHash)/4*blockSize - sizeBack
if start < 0 {
start = 0
}
_, err = org.Seek(start, io.SeekStart)
if err != nil {
t.Fatal(err)
}
buf2 := make([]byte, sizeBack+1<<20)
n, _ := io.ReadFull(org, buf2)
if n > 0 {
err = os.WriteFile(baseFile+".section", buf2[:n], os.ModePerm)
if err == nil {
t.Log("Wrote problematic section to", baseFile+".section")
}
}
}
t.Fatal("block", cHash/4, "offset", cHash/4*blockSize, "hash mismatch, want:", hex.EncodeToString(want), "got:", hex.EncodeToString(got))
}
cHash += 4
}
if n != blockSize {
break
}
}
t.Log("Output matched")
}
type errorReader struct {
err error
}
func (r *errorReader) Read(p []byte) (int, error) {
return 0, r.err
}
func TestErrorReader(t *testing.T) {
wantErr := fmt.Errorf("i'm a failure")
zr, err := NewReader(&errorReader{err: wantErr})
if err != nil {
t.Fatal(err)
}
defer zr.Close()
_, err = io.ReadAll(zr)
if !errors.Is(err, wantErr) {
t.Errorf("want error %v, got %v", wantErr, err)
}
}
type failingWriter struct {
err error
}
func (f failingWriter) Write(_ []byte) (n int, err error) {
return 0, f.err
}
func TestErrorWriter(t *testing.T) {
input := make([]byte, 100)
cmp := bytes.Buffer{}
w, err := NewWriter(&cmp)
if err != nil {
t.Fatal(err)
}
_, _ = rand.Read(input)
_, err = w.Write(input)
if err != nil {
t.Fatal(err)
}
err = w.Close()
if err != nil {
t.Fatal(err)
}
wantErr := fmt.Errorf("i'm a failure")
zr, err := NewReader(&cmp)
if err != nil {
t.Fatal(err)
}
defer zr.Close()
out := failingWriter{err: wantErr}
_, err = zr.WriteTo(out)
if !errors.Is(err, wantErr) {
t.Errorf("error: wanted: %v, got: %v", wantErr, err)
}
}
func TestNewDecoder(t *testing.T) {
for _, n := range []int{1, 4} {
for _, ignoreCRC := range []bool{false, true} {
t.Run(fmt.Sprintf("cpu-%d", n), func(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil, WithDecoderConcurrency(n), IgnoreChecksum(ignoreCRC))
}
testDecoderFile(t, "testdata/decoder.zip", newFn)
dec, err := newFn()
if err != nil {
t.Fatal(err)
}
testDecoderDecodeAll(t, "testdata/decoder.zip", dec)
})
}
}
}
func TestNewDecoderMemory(t *testing.T) {
defer timeout(60 * time.Second)()
var testdata bytes.Buffer
enc, err := NewWriter(&testdata, WithWindowSize(32<<10), WithSingleSegment(false))
if err != nil {
t.Fatal(err)
}
// Write 256KB
for i := 0; i < 256; i++ {
tmp := strings.Repeat(string([]byte{byte(i)}), 1024)
_, err := enc.Write([]byte(tmp))
if err != nil {
t.Fatal(err)
}
}
err = enc.Close()
if err != nil {
t.Fatal(err)
}
var n = 5000
if testing.Short() {
n = 200
}
// 16K buffer
var tmp [16 << 10]byte
var before, after runtime.MemStats
runtime.GC()
runtime.ReadMemStats(&before)
var decs = make([]*Decoder, n)
for i := range decs {
// Wrap in NopCloser to avoid shortcut.
input := io.NopCloser(bytes.NewBuffer(testdata.Bytes()))
decs[i], err = NewReader(input, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
t.Fatal(err)
}
}
for i := range decs {
_, err := io.ReadFull(decs[i], tmp[:])
if err != nil {
t.Fatal(err)
}
}
runtime.GC()
runtime.ReadMemStats(&after)
size := (after.HeapInuse - before.HeapInuse) / uint64(n) / 1024
const expect = 124
t.Log(size, "KiB per decoder")
// This is not exact science, but fail if we suddenly get more than 2x what we expect.
if size > expect*2 && !testing.Short() {
t.Errorf("expected < %dKB per decoder, got %d", expect, size)
}
for _, dec := range decs {
dec.Close()
}
}
func TestNewDecoderMemoryHighMem(t *testing.T) {
defer timeout(60 * time.Second)()
var testdata bytes.Buffer
enc, err := NewWriter(&testdata, WithWindowSize(32<<10), WithSingleSegment(false))
if err != nil {
t.Fatal(err)
}
// Write 256KB
for i := 0; i < 256; i++ {
tmp := strings.Repeat(string([]byte{byte(i)}), 1024)
_, err := enc.Write([]byte(tmp))
if err != nil {
t.Fatal(err)
}
}
err = enc.Close()
if err != nil {
t.Fatal(err)
}
var n = 50
if testing.Short() {
n = 10
}
// 16K buffer
var tmp [16 << 10]byte
var before, after runtime.MemStats
runtime.GC()
runtime.ReadMemStats(&before)
var decs = make([]*Decoder, n)
for i := range decs {
// Wrap in NopCloser to avoid shortcut.
input := io.NopCloser(bytes.NewBuffer(testdata.Bytes()))
decs[i], err = NewReader(input, WithDecoderConcurrency(1), WithDecoderLowmem(false))
if err != nil {
t.Fatal(err)
}
}
for i := range decs {
_, err := io.ReadFull(decs[i], tmp[:])
if err != nil {
t.Fatal(err)
}
}
runtime.GC()
runtime.ReadMemStats(&after)
size := (after.HeapInuse - before.HeapInuse) / uint64(n) / 1024
const expect = 3915
t.Log(size, "KiB per decoder")
// This is not exact science, but fail if we suddenly get more than 2x what we expect.
if size > expect*2 && !testing.Short() {
t.Errorf("expected < %dKB per decoder, got %d", expect, size)
}
for _, dec := range decs {
dec.Close()
}
}
func TestNewDecoderFrameSize(t *testing.T) {
defer timeout(60 * time.Second)()
var testdata bytes.Buffer
enc, err := NewWriter(&testdata, WithWindowSize(64<<10))
if err != nil {
t.Fatal(err)
}
// Write 256KB
for i := 0; i < 256; i++ {
tmp := strings.Repeat(string([]byte{byte(i)}), 1024)
_, err := enc.Write([]byte(tmp))
if err != nil {
t.Fatal(err)
}
}
err = enc.Close()
if err != nil {
t.Fatal(err)
}
// Must fail
dec, err := NewReader(bytes.NewReader(testdata.Bytes()), WithDecoderMaxWindow(32<<10))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(io.Discard, dec)
if err == nil {
dec.Close()
t.Fatal("Wanted error, got none")
}
dec.Close()
// Must succeed.
dec, err = NewReader(bytes.NewReader(testdata.Bytes()), WithDecoderMaxWindow(64<<10))
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(io.Discard, dec)
if err != nil {
dec.Close()
t.Fatalf("Wanted no error, got %+v", err)
}
dec.Close()
}
func TestNewDecoderGood(t *testing.T) {
for _, n := range []int{1, 4} {
t.Run(fmt.Sprintf("cpu-%d", n), func(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil, WithDecoderConcurrency(n))
}
testDecoderFile(t, "testdata/good.zip", newFn)
dec, err := newFn()
if err != nil {
t.Fatal(err)
}
testDecoderDecodeAll(t, "testdata/good.zip", dec)
})
}
}
func TestNewDecoderBad(t *testing.T) {
var errMap = make(map[string]string)
if true {
t.Run("Reader-4", func(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil, WithDecoderConcurrency(4), WithDecoderMaxMemory(1<<30))
}
testDecoderFileBad(t, "testdata/bad.zip", newFn, errMap)
})
t.Run("Reader-1", func(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil, WithDecoderConcurrency(1), WithDecoderMaxMemory(1<<30))
}
testDecoderFileBad(t, "testdata/bad.zip", newFn, errMap)
})
t.Run("Reader-4-bigmem", func(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil, WithDecoderConcurrency(4), WithDecoderMaxMemory(1<<30), WithDecoderLowmem(false))
}
testDecoderFileBad(t, "testdata/bad.zip", newFn, errMap)
})
t.Run("Reader-1-bigmem", func(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil, WithDecoderConcurrency(1), WithDecoderMaxMemory(1<<30), WithDecoderLowmem(false))
}
testDecoderFileBad(t, "testdata/bad.zip", newFn, errMap)
})
}
t.Run("DecodeAll", func(t *testing.T) {
defer timeout(10 * time.Second)()
dec, err := NewReader(nil, WithDecoderMaxMemory(1<<30))
if err != nil {
t.Fatal(err)
}
testDecoderDecodeAllError(t, "testdata/bad.zip", dec, errMap)
})
t.Run("DecodeAll-bigmem", func(t *testing.T) {
defer timeout(10 * time.Second)()
dec, err := NewReader(nil, WithDecoderMaxMemory(1<<30), WithDecoderLowmem(false))
if err != nil {
t.Fatal(err)
}
testDecoderDecodeAllError(t, "testdata/bad.zip", dec, errMap)
})
}
func TestNewDecoderLarge(t *testing.T) {
newFn := func() (*Decoder, error) {
return NewReader(nil)
}
testDecoderFile(t, "testdata/large.zip", newFn)
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
testDecoderDecodeAll(t, "testdata/large.zip", dec)
}
func TestNewReaderRead(t *testing.T) {
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
_, err = dec.Read([]byte{0})
if err == nil {
t.Fatal("Wanted error on uninitialized read, got nil")
}
t.Log("correctly got error", err)
}
func TestNewDecoderBig(t *testing.T) {
if testing.Short() || isRaceTest {
t.SkipNow()
}
file := "testdata/zstd-10kfiles.zip"
if _, err := os.Stat(file); os.IsNotExist(err) {
t.Skip("To run extended tests, download https://files.klauspost.com/compress/zstd-10kfiles.zip \n" +
"and place it in " + file + "\n" + "Running it requires about 5GB of RAM")
}
newFn := func() (*Decoder, error) {
return NewReader(nil)
}
testDecoderFile(t, file, newFn)
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
testDecoderDecodeAll(t, file, dec)
}
func TestNewDecoderBigFile(t *testing.T) {
if testing.Short() || isRaceTest {
t.SkipNow()
}
file := "testdata/enwik9.zst"
const wantSize = 1000000000
if _, err := os.Stat(file); os.IsNotExist(err) {
t.Skip("To run extended tests, download http://mattmahoney.net/dc/enwik9.zip unzip it \n" +
"compress it with 'zstd -15 -T0 enwik9' and place it in " + file)
}
f, err := os.Open(file)
if err != nil {
t.Fatal(err)
}
defer f.Close()
start := time.Now()
dec, err := NewReader(f)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
n, err := io.Copy(io.Discard, dec)
if err != nil {
t.Fatal(err)
}
if n != wantSize {
t.Errorf("want size %d, got size %d", wantSize, n)
}
elapsed := time.Since(start)
mbpersec := (float64(n) / (1024 * 1024)) / (float64(elapsed) / (float64(time.Second)))
t.Logf("Decoded %d bytes with %f.2 MB/s", n, mbpersec)
}
func TestNewDecoderSmallFile(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
file := "testdata/z000028.zst"
const wantSize = 39807
f, err := os.Open(file)
if err != nil {
t.Fatal(err)
}
defer f.Close()
start := time.Now()
dec, err := NewReader(f)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
n, err := io.Copy(io.Discard, dec)
if err != nil {
t.Fatal(err)
}
if n != wantSize {
t.Errorf("want size %d, got size %d", wantSize, n)
}
mbpersec := (float64(n) / (1024 * 1024)) / (float64(time.Since(start)) / (float64(time.Second)))
t.Logf("Decoded %d bytes with %f.2 MB/s", n, mbpersec)
}
// cursedReader wraps a reader and returns zero bytes every other read.
// This is used to test the ability of the consumer to handle empty reads without EOF,
// which can happen when reading from a network connection.
type cursedReader struct {
io.Reader
numReads int
}
func (r *cursedReader) Read(p []byte) (n int, err error) {
r.numReads++
if r.numReads%2 == 0 {
return 0, nil
}
return r.Reader.Read(p)
}
func TestNewDecoderZeroLengthReads(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
file := "testdata/z000028.zst"
const wantSize = 39807
f, err := os.Open(file)
if err != nil {
t.Fatal(err)
}
defer f.Close()
dec, err := NewReader(&cursedReader{Reader: f})
if err != nil {
t.Fatal(err)
}
defer dec.Close()
n, err := io.Copy(io.Discard, dec)
if err != nil {
t.Fatal(err)
}
if n != wantSize {
t.Errorf("want size %d, got size %d", wantSize, n)
}
}
type readAndBlock struct {
buf []byte
unblock chan struct{}
}
func (r *readAndBlock) Read(p []byte) (int, error) {
n := copy(p, r.buf)
if n == 0 {
<-r.unblock
return 0, io.EOF
}
r.buf = r.buf[n:]
return n, nil
}
func TestNewDecoderFlushed(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
file := "testdata/z000028.zst"
payload, err := os.ReadFile(file)
if err != nil {
t.Fatal(err)
}
payload = append(payload, payload...) //2x
payload = append(payload, payload...) //4x
payload = append(payload, payload...) //8x
rng := rand.New(rand.NewSource(0x1337))
runs := 100
if testing.Short() {
runs = 5
}
enc, err := NewWriter(nil, WithWindowSize(128<<10))
if err != nil {
t.Fatal(err)
}
defer enc.Close()
for i := 0; i < runs; i++ {
wantSize := rng.Intn(len(payload)-1) + 1
t.Run(fmt.Sprint("size-", wantSize), func(t *testing.T) {
var encoded bytes.Buffer
enc.Reset(&encoded)
_, err := enc.Write(payload[:wantSize])
if err != nil {
t.Fatal(err)
}
err = enc.Flush()
if err != nil {
t.Fatal(err)
}
// We must be able to read back up until the flush...
r := readAndBlock{
buf: encoded.Bytes(),
unblock: make(chan struct{}),
}
defer timeout(5 * time.Second)()
dec, err := NewReader(&r)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
defer close(r.unblock)
readBack := 0
dst := make([]byte, 1024)
for readBack < wantSize {
// Read until we have enough.
n, err := dec.Read(dst)
if err != nil {
t.Fatal(err)
}
readBack += n
}
})
}
}
func TestDecoderRegression(t *testing.T) {
defer timeout(160 * time.Second)()
zr := testCreateZipReader("testdata/regression.zip", t)
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<20))
if err != nil {
t.Error(err)
return
}
defer dec.Close()
for i, tt := range zr.File {
if testing.Short() && i > 10 {
continue
}
t.Run("Reader-"+tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Error(err)
return
}
err = dec.Reset(r)
if err != nil {
t.Error(err)
return
}
got, gotErr := io.ReadAll(dec)
t.Log("Received:", len(got), gotErr)
// Check a fresh instance
r, err = tt.Open()
if err != nil {
t.Error(err)
return
}
decL, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<20))
if err != nil {
t.Error(err)
return
}
defer decL.Close()
got2, gotErr2 := io.ReadAll(decL)
t.Log("Fresh Reader received:", len(got2), gotErr2)
if gotErr != gotErr2 {
if gotErr != nil && gotErr2 != nil && gotErr.Error() != gotErr2.Error() {
t.Error(gotErr, "!=", gotErr2)
}
if (gotErr == nil) != (gotErr2 == nil) {
t.Error(gotErr, "!=", gotErr2)
}
}
if !bytes.Equal(got2, got) {
if gotErr != nil {
t.Log("Buffer mismatch without Reset")
} else {
t.Error("Buffer mismatch without Reset")
}
}
})
t.Run("DecodeAll-"+tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Error(err)
return
}
in, err := io.ReadAll(r)
if err != nil {
t.Error(err)
}
got, gotErr := dec.DecodeAll(in, make([]byte, 0, len(in)))
t.Log("Received:", len(got), gotErr)
// Check if we got the same:
decL, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<20))
if err != nil {
t.Error(err)
return
}
defer decL.Close()
got2, gotErr2 := decL.DecodeAll(in, make([]byte, 0, len(in)/2))
t.Log("Fresh Reader received:", len(got2), gotErr2)
if gotErr != gotErr2 {
if gotErr != nil && gotErr2 != nil && gotErr.Error() != gotErr2.Error() {
t.Error(gotErr, "!=", gotErr2)
}
if (gotErr == nil) != (gotErr2 == nil) {
t.Error(gotErr, "!=", gotErr2)
}
}
if !bytes.Equal(got2, got) {
if gotErr != nil {
t.Log("Buffer mismatch without Reset")
} else {
t.Error("Buffer mismatch without Reset")
}
}
})
t.Run("Match-"+tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Error(err)
return
}
in, err := io.ReadAll(r)
if err != nil {
t.Error(err)
}
got, gotErr := dec.DecodeAll(in, make([]byte, 0, len(in)))
t.Log("Received:", len(got), gotErr)
// Check a fresh instance
decL, err := NewReader(bytes.NewBuffer(in), WithDecoderConcurrency(1), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<20))
if err != nil {
t.Error(err)
return
}
defer decL.Close()
got2, gotErr2 := io.ReadAll(decL)
t.Log("Reader Reader received:", len(got2), gotErr2)
if gotErr != gotErr2 {
if gotErr != nil && gotErr2 != nil && gotErr.Error() != gotErr2.Error() {
t.Error(gotErr, "!=", gotErr2)
}
if (gotErr == nil) != (gotErr2 == nil) {
t.Error(gotErr, "!=", gotErr2)
}
}
if !bytes.Equal(got2, got) {
if gotErr != nil {
t.Log("Buffer mismatch")
} else {
t.Error("Buffer mismatch")
}
}
})
}
}
func TestShort(t *testing.T) {
for _, in := range []string{"f", "fo", "foo"} {
inb := []byte(in)
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
t.Run(fmt.Sprintf("DecodeAll-%d", len(in)), func(t *testing.T) {
_, err := dec.DecodeAll(inb, nil)
if err == nil {
t.Error("want error, got nil")
}
})
t.Run(fmt.Sprintf("Reader-%d", len(in)), func(t *testing.T) {
dec.Reset(bytes.NewReader(inb))
_, err := io.Copy(io.Discard, dec)
if err == nil {
t.Error("want error, got nil")
}
})
}
}
func TestDecoder_Reset(t *testing.T) {
in, err := os.ReadFile("testdata/z000028")
if err != nil {
t.Fatal(err)
}
in = append(in, in...)
var e Encoder
start := time.Now()
dst := e.EncodeAll(in, nil)
t.Log("Simple Encoder len", len(in), "-> zstd len", len(dst))
mbpersec := (float64(len(in)) / (1024 * 1024)) / (float64(time.Since(start)) / (float64(time.Second)))
t.Logf("Encoded %d bytes with %.2f MB/s", len(in), mbpersec)
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
decoded, err := dec.DecodeAll(dst, nil)
if err != nil {
t.Error(err, len(decoded))
}
if !bytes.Equal(decoded, in) {
t.Logf("size = %d, got = %d", len(decoded), len(in))
t.Fatal("Decoded does not match")
}
t.Log("Encoded content matched")
// Decode using reset+copy
for i := 0; i < 3; i++ {
err = dec.Reset(bytes.NewBuffer(dst))
if err != nil {
t.Fatal(err)
}
var dBuf bytes.Buffer
n, err := io.Copy(&dBuf, dec)
if err != nil {
t.Fatal(err)
}
decoded = dBuf.Bytes()
if int(n) != len(decoded) {
t.Fatalf("decoded reported length mismatch %d != %d", n, len(decoded))
}
if !bytes.Equal(decoded, in) {
os.WriteFile("testdata/"+t.Name()+"-z000028.got", decoded, os.ModePerm)
os.WriteFile("testdata/"+t.Name()+"-z000028.want", in, os.ModePerm)
t.Fatal("Decoded does not match")
}
}
// Test without WriterTo interface support.
for i := 0; i < 3; i++ {
err = dec.Reset(bytes.NewBuffer(dst))
if err != nil {
t.Fatal(err)
}
decoded, err := io.ReadAll(io.NopCloser(dec))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(decoded, in) {
os.WriteFile("testdata/"+t.Name()+"-z000028.got", decoded, os.ModePerm)
os.WriteFile("testdata/"+t.Name()+"-z000028.want", in, os.ModePerm)
t.Fatal("Decoded does not match")
}
}
}
func TestDecoderMultiFrame(t *testing.T) {
zr := testCreateZipReader("testdata/benchdecoder.zip", t)
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
t.Run(tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
// 2x
in = append(in, in...)
if !testing.Short() {
// 4x
in = append(in, in...)
// 8x
in = append(in, in...)
}
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
t.Fatal(err)
}
got, err := io.ReadAll(dec)
if err != nil {
t.Fatal(err)
}
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
t.Fatal(err)
}
got2, err := io.ReadAll(dec)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, got2) {
t.Error("results mismatch")
}
})
}
}
func TestDecoderMultiFrameReset(t *testing.T) {
zr := testCreateZipReader("testdata/benchdecoder.zip", t)
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
return
}
rng := rand.New(rand.NewSource(1337))
defer dec.Close()
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
t.Run(tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
// 2x
in = append(in, in...)
if !testing.Short() {
// 4x
in = append(in, in...)
// 8x
in = append(in, in...)
}
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
t.Fatal(err)
}
got, err := io.ReadAll(dec)
if err != nil {
t.Fatal(err)
}
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
t.Fatal(err)
}
// Read a random number of bytes
tmp := make([]byte, rng.Intn(len(got)))
_, err = io.ReadAtLeast(dec, tmp, len(tmp))
if err != nil {
t.Fatal(err)
}
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
t.Fatal(err)
}
got2, err := io.ReadAll(dec)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, got2) {
t.Error("results mismatch")
}
})
}
}
func testDecoderFile(t *testing.T, fn string, newDec func() (*Decoder, error)) {
zr := testCreateZipReader(fn, t)
var want = make(map[string][]byte)
for _, tt := range zr.File {
if strings.HasSuffix(tt.Name, ".zst") {
continue
}
r, err := tt.Open()
if err != nil {
t.Fatal(err)
return
}
want[tt.Name+".zst"], _ = io.ReadAll(r)
}
dec, err := newDec()
if err != nil {
t.Error(err)
return
}
defer dec.Close()
for i, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") || (testing.Short() && i > 20) {
continue
}
t.Run("Reader-"+tt.Name, func(t *testing.T) {
defer timeout(10 * time.Second)()
r, err := tt.Open()
if err != nil {
t.Error(err)
return
}
data, err := io.ReadAll(r)
r.Close()
if err != nil {
t.Error(err)
return
}
err = dec.Reset(io.NopCloser(bytes.NewBuffer(data)))
if err != nil {
t.Error(err)
return
}
var got []byte
var gotError error
var wg sync.WaitGroup
wg.Add(1)
go func() {
got, gotError = io.ReadAll(dec)
wg.Done()
}()
// This decode should not interfere with the stream...
gotDecAll, err := dec.DecodeAll(data, nil)
if err != nil {
t.Error(err)
if err != ErrCRCMismatch {
wg.Wait()
return
}
}
wg.Wait()
if gotError != nil {
t.Error(gotError, err)
if err != ErrCRCMismatch {
return
}
}
wantB := want[tt.Name]
compareWith := func(got []byte, displayName, name string) bool {
if bytes.Equal(wantB, got) {
return false
}
if len(wantB)+len(got) < 1000 {
t.Logf(" got: %v\nwant: %v", got, wantB)
} else {
fileName, _ := filepath.Abs(filepath.Join("testdata", t.Name()+"-want.bin"))
_ = os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
err := os.WriteFile(fileName, wantB, os.ModePerm)
t.Log("Wrote file", fileName, err)
fileName, _ = filepath.Abs(filepath.Join("testdata", t.Name()+"-"+name+".bin"))
_ = os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
err = os.WriteFile(fileName, got, os.ModePerm)
t.Log("Wrote file", fileName, err)
}
t.Logf("Length, want: %d, got: %d", len(wantB), len(got))
t.Errorf("%s mismatch", displayName)
return true
}
if compareWith(got, "Output", "got") {
return
}
if compareWith(gotDecAll, "DecodeAll Output", "decoded") {
return
}
t.Log(len(got), "bytes returned, matches input, ok!")
})
}
}
func testDecoderFileBad(t *testing.T, fn string, newDec func() (*Decoder, error), errMap map[string]string) {
zr := testCreateZipReader(fn, t)
var want = make(map[string][]byte)
for _, tt := range zr.File {
if strings.HasSuffix(tt.Name, ".zst") {
continue
}
r, err := tt.Open()
if err != nil {
t.Fatal(err)
return
}
want[tt.Name+".zst"], _ = io.ReadAll(r)
}
dec, err := newDec()
if err != nil {
t.Error(err)
return
}
defer dec.Close()
for _, tt := range zr.File {
t.Run(tt.Name, func(t *testing.T) {
defer timeout(10 * time.Second)()
r, err := tt.Open()
if err != nil {
t.Error(err)
return
}
defer r.Close()
err = dec.Reset(r)
if err != nil {
t.Error(err)
return
}
got, err := io.ReadAll(dec)
if err == ErrCRCMismatch && !strings.Contains(tt.Name, "badsum") {
t.Error(err)
return
}
if err == nil {
want := errMap[tt.Name]
if want == "" {
want = "<error>"
}
t.Error("Did not get expected error", want, "- got", len(got), "bytes")
return
}
if errMap[tt.Name] == "" {
errMap[tt.Name] = err.Error()
} else {
want := errMap[tt.Name]
if want != err.Error() {
t.Errorf("error mismatch, prev run got %s, now got %s", want, err.Error())
}
return
}
t.Log("got error", err)
})
}
}
func BenchmarkDecoder_DecoderSmall(b *testing.B) {
zr := testCreateZipReader("testdata/benchdecoder.zip", b)
dec, err := NewReader(nil, WithDecodeBuffersBelow(1<<30))
if err != nil {
b.Fatal(err)
return
}
defer dec.Close()
dec2, err := NewReader(nil, WithDecodeBuffersBelow(0))
if err != nil {
b.Fatal(err)
return
}
defer dec2.Close()
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
b.Run(tt.Name, func(b *testing.B) {
r, err := tt.Open()
if err != nil {
b.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
b.Fatal(err)
}
// 2x
in = append(in, in...)
// 4x
in = append(in, in...)
// 8x
in = append(in, in...)
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
b.Fatal(err)
}
got, err := io.ReadAll(dec)
if err != nil {
b.Fatal(err)
}
b.Run("buffered", func(b *testing.B) {
b.SetBytes(int64(len(got)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec.Reset(bytes.NewBuffer(in))
if err != nil {
b.Fatal(err)
}
n, err := io.Copy(io.Discard, dec)
if err != nil {
b.Fatal(err)
}
if int(n) != len(got) {
b.Fatalf("want %d, got %d", len(got), n)
}
}
})
b.Run("unbuffered", func(b *testing.B) {
b.SetBytes(int64(len(got)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec2.Reset(bytes.NewBuffer(in))
if err != nil {
b.Fatal(err)
}
n, err := io.Copy(io.Discard, dec2)
if err != nil {
b.Fatal(err)
}
if int(n) != len(got) {
b.Fatalf("want %d, got %d", len(got), n)
}
}
})
})
}
}
func BenchmarkDecoder_DecoderReset(b *testing.B) {
zr := testCreateZipReader("testdata/benchdecoder.zip", b)
dec, err := NewReader(nil, WithDecodeBuffersBelow(0))
if err != nil {
b.Fatal(err)
return
}
defer dec.Close()
bench := func(name string, b *testing.B, opts []DOption, in, want []byte) {
b.Helper()
buf := newBytesReader(in)
dec, err := NewReader(nil, opts...)
if err != nil {
b.Fatal(err)
return
}
defer dec.Close()
b.Run(name, func(b *testing.B) {
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.Reset(in)
err = dec.Reset(buf)
if err != nil {
b.Fatal(err)
}
}
})
}
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
b.Run(tt.Name, func(b *testing.B) {
r, err := tt.Open()
if err != nil {
b.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
b.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
b.Fatal(err)
}
// Disable buffers:
bench("stream", b, []DOption{WithDecodeBuffersBelow(0)}, in, got)
bench("stream-single", b, []DOption{WithDecodeBuffersBelow(0), WithDecoderConcurrency(1)}, in, got)
// Force buffers:
bench("buffer", b, []DOption{WithDecodeBuffersBelow(1 << 30)}, in, got)
bench("buffer-single", b, []DOption{WithDecodeBuffersBelow(1 << 30), WithDecoderConcurrency(1)}, in, got)
})
}
}
// newBytesReader returns a *bytes.Reader that also supports Bytes() []byte
func newBytesReader(b []byte) *bytesReader {
return &bytesReader{Reader: bytes.NewReader(b), buf: b}
}
type bytesReader struct {
*bytes.Reader
buf []byte
}
func (b *bytesReader) Bytes() []byte {
n := b.Reader.Len()
if n > len(b.buf) {
panic("buffer mismatch")
}
return b.buf[len(b.buf)-n:]
}
func (b *bytesReader) Reset(data []byte) {
b.buf = data
b.Reader.Reset(data)
}
func BenchmarkDecoder_DecoderNewNoRead(b *testing.B) {
zr := testCreateZipReader("testdata/benchdecoder.zip", b)
dec, err := NewReader(nil)
if err != nil {
b.Fatal(err)
return
}
defer dec.Close()
bench := func(name string, b *testing.B, opts []DOption, in, want []byte) {
b.Helper()
b.Run(name, func(b *testing.B) {
buf := newBytesReader(in)
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.Reset(in)
dec, err := NewReader(buf, opts...)
if err != nil {
b.Fatal(err)
return
}
dec.Close()
}
})
}
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
b.Run(tt.Name, func(b *testing.B) {
r, err := tt.Open()
if err != nil {
b.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
b.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
b.Fatal(err)
}
// Disable buffers:
bench("stream", b, []DOption{WithDecodeBuffersBelow(0)}, in, got)
bench("stream-single", b, []DOption{WithDecodeBuffersBelow(0), WithDecoderConcurrency(1)}, in, got)
// Force buffers:
bench("buffer", b, []DOption{WithDecodeBuffersBelow(1 << 30)}, in, got)
bench("buffer-single", b, []DOption{WithDecodeBuffersBelow(1 << 30), WithDecoderConcurrency(1)}, in, got)
})
}
}
func BenchmarkDecoder_DecoderNewSomeRead(b *testing.B) {
var buf [1 << 20]byte
bench := func(name string, b *testing.B, opts []DOption, in *os.File) {
b.Helper()
b.Run(name, func(b *testing.B) {
//b.ReportAllocs()
b.ResetTimer()
var heapTotal int64
var m runtime.MemStats
for i := 0; i < b.N; i++ {
runtime.GC()
runtime.ReadMemStats(&m)
heapTotal -= int64(m.HeapInuse)
_, err := in.Seek(io.SeekStart, 0)
if err != nil {
b.Fatal(err)
}
dec, err := NewReader(in, opts...)
if err != nil {
b.Fatal(err)
}
// Read 16 MB
_, err = io.CopyBuffer(io.Discard, io.LimitReader(dec, 16<<20), buf[:])
if err != nil {
b.Fatal(err)
}
runtime.GC()
runtime.ReadMemStats(&m)
heapTotal += int64(m.HeapInuse)
dec.Close()
}
b.ReportMetric(float64(heapTotal)/float64(b.N), "b/op")
})
}
files := []string{"testdata/000002.map.win32K.zst", "testdata/000002.map.win1MB.zst", "testdata/000002.map.win8MB.zst"}
for _, file := range files {
if !strings.HasSuffix(file, ".zst") {
continue
}
r, err := os.Open(file)
if err != nil {
b.Fatal(err)
}
defer r.Close()
b.Run(file, func(b *testing.B) {
bench("stream-single", b, []DOption{WithDecodeBuffersBelow(0), WithDecoderConcurrency(1)}, r)
bench("stream-single-himem", b, []DOption{WithDecodeBuffersBelow(0), WithDecoderConcurrency(1), WithDecoderLowmem(false)}, r)
})
}
}
func BenchmarkDecoder_DecodeAll(b *testing.B) {
zr := testCreateZipReader("testdata/benchdecoder.zip", b)
dec, err := NewReader(nil, WithDecoderConcurrency(1))
if err != nil {
b.Fatal(err)
return
}
defer dec.Close()
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
b.Run(tt.Name, func(b *testing.B) {
r, err := tt.Open()
if err != nil {
b.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
b.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(got)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = dec.DecodeAll(in, got[:0])
if err != nil {
b.Fatal(err)
}
}
})
}
}
func BenchmarkDecoder_DecodeAllFiles(b *testing.B) {
filepath.Walk("../testdata/", func(path string, info os.FileInfo, err error) error {
if info.IsDir() || info.Size() < 100 {
return nil
}
b.Run(filepath.Base(path), func(b *testing.B) {
raw, err := os.ReadFile(path)
if err != nil {
b.Error(err)
}
for i := SpeedFastest; i <= SpeedBestCompression; i++ {
if testing.Short() && i > SpeedFastest {
break
}
b.Run(i.String(), func(b *testing.B) {
enc, err := NewWriter(nil, WithEncoderLevel(i), WithSingleSegment(true))
if err != nil {
b.Error(err)
}
encoded := enc.EncodeAll(raw, nil)
if err != nil {
b.Error(err)
}
dec, err := NewReader(nil, WithDecoderConcurrency(1))
if err != nil {
b.Error(err)
}
decoded, err := dec.DecodeAll(encoded, nil)
if err != nil {
b.Error(err)
}
b.SetBytes(int64(len(raw)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
decoded, err = dec.DecodeAll(encoded, decoded[:0])
if err != nil {
b.Error(err)
}
}
b.ReportMetric(100*float64(len(encoded))/float64(len(raw)), "pct")
})
}
})
return nil
})
}
func BenchmarkDecoder_DecodeAllFilesP(b *testing.B) {
filepath.Walk("../testdata/", func(path string, info os.FileInfo, err error) error {
if info.IsDir() || info.Size() < 100 {
return nil
}
b.Run(filepath.Base(path), func(b *testing.B) {
raw, err := os.ReadFile(path)
if err != nil {
b.Error(err)
}
for i := SpeedFastest; i <= SpeedBestCompression; i++ {
if testing.Short() && i > SpeedFastest {
break
}
b.Run(i.String(), func(b *testing.B) {
enc, err := NewWriter(nil, WithEncoderLevel(i), WithSingleSegment(true))
if err != nil {
b.Error(err)
}
encoded := enc.EncodeAll(raw, nil)
if err != nil {
b.Error(err)
}
dec, err := NewReader(nil, WithDecoderConcurrency(0))
if err != nil {
b.Error(err)
}
raw, err := dec.DecodeAll(encoded, nil)
if err != nil {
b.Error(err)
}
b.SetBytes(int64(len(raw)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
buf := make([]byte, cap(raw))
var err error
for pb.Next() {
buf, err = dec.DecodeAll(encoded, buf[:0])
if err != nil {
b.Error(err)
}
}
})
b.ReportMetric(100*float64(len(encoded))/float64(len(raw)), "pct")
})
}
})
return nil
})
}
func BenchmarkDecoder_DecodeAllParallel(b *testing.B) {
zr := testCreateZipReader("testdata/benchdecoder.zip", b)
dec, err := NewReader(nil, WithDecoderConcurrency(runtime.GOMAXPROCS(0)))
if err != nil {
b.Fatal(err)
return
}
defer dec.Close()
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
b.Run(tt.Name, func(b *testing.B) {
r, err := tt.Open()
if err != nil {
b.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
b.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(got)))
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
got := make([]byte, cap(got))
for pb.Next() {
_, err = dec.DecodeAll(in, got[:0])
if err != nil {
b.Fatal(err)
}
}
})
b.ReportMetric(100*float64(len(in))/float64(len(got)), "pct")
})
}
}
func benchmarkDecoderWithFile(path string, b *testing.B) {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
b.Skipf("Missing %s", path)
return
}
b.Fatal(err)
}
data, err := os.ReadFile(path)
if err != nil {
b.Fatal(err)
}
dec, err := NewReader(bytes.NewBuffer(data), WithDecoderLowmem(false), WithDecoderConcurrency(1))
if err != nil {
b.Fatal(err)
}
n, err := io.Copy(io.Discard, dec)
if err != nil {
b.Fatal(err)
}
b.Run("multithreaded-writer", func(b *testing.B) {
dec, err := NewReader(nil, WithDecoderLowmem(true))
if err != nil {
b.Fatal(err)
}
b.SetBytes(n)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec.Reset(bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
}
_, err := io.CopyN(io.Discard, dec, n)
if err != nil {
b.Fatal(err)
}
}
})
b.Run("multithreaded-writer-himem", func(b *testing.B) {
dec, err := NewReader(nil, WithDecoderLowmem(false))
if err != nil {
b.Fatal(err)
}
b.SetBytes(n)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec.Reset(bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
}
_, err := io.CopyN(io.Discard, dec, n)
if err != nil {
b.Fatal(err)
}
}
})
b.Run("singlethreaded-writer", func(b *testing.B) {
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
b.Fatal(err)
}
b.SetBytes(n)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec.Reset(bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
}
_, err := io.CopyN(io.Discard, dec, n)
if err != nil {
b.Fatal(err)
}
}
})
b.Run("singlethreaded-writerto", func(b *testing.B) {
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
b.Fatal(err)
}
b.SetBytes(n)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec.Reset(bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
}
// io.Copy will use io.WriterTo
_, err := io.Copy(io.Discard, dec)
if err != nil {
b.Fatal(err)
}
}
})
b.Run("singlethreaded-himem", func(b *testing.B) {
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderLowmem(false))
if err != nil {
b.Fatal(err)
}
b.SetBytes(n)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err = dec.Reset(bytes.NewBuffer(data))
if err != nil {
b.Fatal(err)
}
// io.Copy will use io.WriterTo
_, err := io.Copy(io.Discard, dec)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkDecoderSilesia(b *testing.B) {
benchmarkDecoderWithFile("testdata/silesia.tar.zst", b)
}
func BenchmarkDecoderEnwik9(b *testing.B) {
benchmarkDecoderWithFile("testdata/enwik9.zst", b)
}
func BenchmarkDecoderWithCustomFiles(b *testing.B) {
const info = "To run benchmark on custom .zst files, please place your files in subdirectory 'testdata/benchmark-custom'.\nEach file is tested in a separate benchmark, thus it is possible to select files with the standard command 'go test -bench BenchmarkDecoderWithCustomFiles/<pattern>."
const subdir = "testdata/benchmark-custom"
if _, err := os.Stat(subdir); os.IsNotExist(err) {
b.Skip(info)
}
files, err := filepath.Glob(filepath.Join(subdir, "*.zst"))
if err != nil {
b.Error(err)
return
}
if len(files) == 0 {
b.Skip(info)
}
for _, path := range files {
name := filepath.Base(path)
b.Run(name, func(b *testing.B) { benchmarkDecoderWithFile(path, b) })
}
}
func testDecoderDecodeAll(t *testing.T, fn string, dec *Decoder) {
zr := testCreateZipReader(fn, t)
var want = make(map[string][]byte)
for _, tt := range zr.File {
if strings.HasSuffix(tt.Name, ".zst") {
continue
}
r, err := tt.Open()
if err != nil {
t.Fatal(err)
return
}
want[tt.Name+".zst"], _ = io.ReadAll(r)
}
var wg sync.WaitGroup
for i, tt := range zr.File {
tt := tt
if !strings.HasSuffix(tt.Name, ".zst") || (testing.Short() && i > 20) {
continue
}
wg.Add(1)
t.Run("DecodeAll-"+tt.Name, func(t *testing.T) {
defer wg.Done()
t.Parallel()
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
wantB := want[tt.Name]
// make a buffer that is too small.
got, err := dec.DecodeAll(in, make([]byte, 10, 200))
if err != nil {
t.Error(err)
}
if len(got) < 10 {
t.Fatal("didn't get input back")
}
got = got[10:]
if !bytes.Equal(wantB, got) {
if len(wantB)+len(got) < 1000 {
t.Logf(" got: %v\nwant: %v", got, wantB)
} else {
fileName, _ := filepath.Abs(filepath.Join("testdata", t.Name()+"-want.bin"))
_ = os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
err := os.WriteFile(fileName, wantB, os.ModePerm)
t.Log("Wrote file", fileName, err)
fileName, _ = filepath.Abs(filepath.Join("testdata", t.Name()+"-got.bin"))
_ = os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
err = os.WriteFile(fileName, got, os.ModePerm)
t.Log("Wrote file", fileName, err)
}
t.Logf("Length, want: %d, got: %d", len(wantB), len(got))
t.Error("Output mismatch")
return
}
t.Log(len(got), "bytes returned, matches input, ok!")
})
}
go func() {
wg.Wait()
dec.Close()
}()
}
func testDecoderDecodeAllError(t *testing.T, fn string, dec *Decoder, errMap map[string]string) {
zr := testCreateZipReader(fn, t)
var wg sync.WaitGroup
for _, tt := range zr.File {
tt := tt
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
wg.Add(1)
t.Run(tt.Name, func(t *testing.T) {
defer wg.Done()
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
// make a buffer that is small.
got, err := dec.DecodeAll(in, make([]byte, 0, 20))
if err == nil {
t.Error("Did not get expected error, got", len(got), "bytes")
return
}
t.Log(err)
if errMap[tt.Name] == "" {
t.Error("cannot check error")
} else {
want := errMap[tt.Name]
if want != err.Error() {
if want == ErrFrameSizeMismatch.Error() && err == ErrDecoderSizeExceeded {
return
}
if want == ErrWindowSizeExceeded.Error() && err == ErrDecoderSizeExceeded {
return
}
t.Errorf("error mismatch, prev run got %s, now got %s", want, err.Error())
}
return
}
})
}
go func() {
wg.Wait()
dec.Close()
}()
}
// Test our predefined tables are correct.
// We don't predefine them, since this also tests our transformations.
// Reference from here: https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L234
func TestPredefTables(t *testing.T) {
initPredefined()
x := func(nextState uint16, nbAddBits, nbBits uint8, baseVal uint32) decSymbol {
return newDecSymbol(nbBits, nbAddBits, nextState, baseVal)
}
for i := range fsePredef[:] {
var want []decSymbol
switch tableIndex(i) {
case tableLiteralLengths:
want = []decSymbol{
/* nextState, nbAddBits, nbBits, baseVal */
x(0, 0, 4, 0), x(16, 0, 4, 0),
x(32, 0, 5, 1), x(0, 0, 5, 3),
x(0, 0, 5, 4), x(0, 0, 5, 6),
x(0, 0, 5, 7), x(0, 0, 5, 9),
x(0, 0, 5, 10), x(0, 0, 5, 12),
x(0, 0, 6, 14), x(0, 1, 5, 16),
x(0, 1, 5, 20), x(0, 1, 5, 22),
x(0, 2, 5, 28), x(0, 3, 5, 32),
x(0, 4, 5, 48), x(32, 6, 5, 64),
x(0, 7, 5, 128), x(0, 8, 6, 256),
x(0, 10, 6, 1024), x(0, 12, 6, 4096),
x(32, 0, 4, 0), x(0, 0, 4, 1),
x(0, 0, 5, 2), x(32, 0, 5, 4),
x(0, 0, 5, 5), x(32, 0, 5, 7),
x(0, 0, 5, 8), x(32, 0, 5, 10),
x(0, 0, 5, 11), x(0, 0, 6, 13),
x(32, 1, 5, 16), x(0, 1, 5, 18),
x(32, 1, 5, 22), x(0, 2, 5, 24),
x(32, 3, 5, 32), x(0, 3, 5, 40),
x(0, 6, 4, 64), x(16, 6, 4, 64),
x(32, 7, 5, 128), x(0, 9, 6, 512),
x(0, 11, 6, 2048), x(48, 0, 4, 0),
x(16, 0, 4, 1), x(32, 0, 5, 2),
x(32, 0, 5, 3), x(32, 0, 5, 5),
x(32, 0, 5, 6), x(32, 0, 5, 8),
x(32, 0, 5, 9), x(32, 0, 5, 11),
x(32, 0, 5, 12), x(0, 0, 6, 15),
x(32, 1, 5, 18), x(32, 1, 5, 20),
x(32, 2, 5, 24), x(32, 2, 5, 28),
x(32, 3, 5, 40), x(32, 4, 5, 48),
x(0, 16, 6, 65536), x(0, 15, 6, 32768),
x(0, 14, 6, 16384), x(0, 13, 6, 8192),
}
case tableOffsets:
want = []decSymbol{
/* nextState, nbAddBits, nbBits, baseVal */
x(0, 0, 5, 0), x(0, 6, 4, 61),
x(0, 9, 5, 509), x(0, 15, 5, 32765),
x(0, 21, 5, 2097149), x(0, 3, 5, 5),
x(0, 7, 4, 125), x(0, 12, 5, 4093),
x(0, 18, 5, 262141), x(0, 23, 5, 8388605),
x(0, 5, 5, 29), x(0, 8, 4, 253),
x(0, 14, 5, 16381), x(0, 20, 5, 1048573),
x(0, 2, 5, 1), x(16, 7, 4, 125),
x(0, 11, 5, 2045), x(0, 17, 5, 131069),
x(0, 22, 5, 4194301), x(0, 4, 5, 13),
x(16, 8, 4, 253), x(0, 13, 5, 8189),
x(0, 19, 5, 524285), x(0, 1, 5, 1),
x(16, 6, 4, 61), x(0, 10, 5, 1021),
x(0, 16, 5, 65533), x(0, 28, 5, 268435453),
x(0, 27, 5, 134217725), x(0, 26, 5, 67108861),
x(0, 25, 5, 33554429), x(0, 24, 5, 16777213),
}
case tableMatchLengths:
want = []decSymbol{
/* nextState, nbAddBits, nbBits, baseVal */
x(0, 0, 6, 3), x(0, 0, 4, 4),
x(32, 0, 5, 5), x(0, 0, 5, 6),
x(0, 0, 5, 8), x(0, 0, 5, 9),
x(0, 0, 5, 11), x(0, 0, 6, 13),
x(0, 0, 6, 16), x(0, 0, 6, 19),
x(0, 0, 6, 22), x(0, 0, 6, 25),
x(0, 0, 6, 28), x(0, 0, 6, 31),
x(0, 0, 6, 34), x(0, 1, 6, 37),
x(0, 1, 6, 41), x(0, 2, 6, 47),
x(0, 3, 6, 59), x(0, 4, 6, 83),
x(0, 7, 6, 131), x(0, 9, 6, 515),
x(16, 0, 4, 4), x(0, 0, 4, 5),
x(32, 0, 5, 6), x(0, 0, 5, 7),
x(32, 0, 5, 9), x(0, 0, 5, 10),
x(0, 0, 6, 12), x(0, 0, 6, 15),
x(0, 0, 6, 18), x(0, 0, 6, 21),
x(0, 0, 6, 24), x(0, 0, 6, 27),
x(0, 0, 6, 30), x(0, 0, 6, 33),
x(0, 1, 6, 35), x(0, 1, 6, 39),
x(0, 2, 6, 43), x(0, 3, 6, 51),
x(0, 4, 6, 67), x(0, 5, 6, 99),
x(0, 8, 6, 259), x(32, 0, 4, 4),
x(48, 0, 4, 4), x(16, 0, 4, 5),
x(32, 0, 5, 7), x(32, 0, 5, 8),
x(32, 0, 5, 10), x(32, 0, 5, 11),
x(0, 0, 6, 14), x(0, 0, 6, 17),
x(0, 0, 6, 20), x(0, 0, 6, 23),
x(0, 0, 6, 26), x(0, 0, 6, 29),
x(0, 0, 6, 32), x(0, 16, 6, 65539),
x(0, 15, 6, 32771), x(0, 14, 6, 16387),
x(0, 13, 6, 8195), x(0, 12, 6, 4099),
x(0, 11, 6, 2051), x(0, 10, 6, 1027),
}
}
pre := fsePredef[i]
got := pre.dt[:1<<pre.actualTableLog]
if !reflect.DeepEqual(got, want) {
t.Logf("want: %v", want)
t.Logf("got : %v", got)
t.Errorf("Predefined table %d incorrect, len(got) = %d, len(want) = %d", i, len(got), len(want))
}
}
}
func TestResetNil(t *testing.T) {
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
_, err = io.ReadAll(dec)
if err != ErrDecoderNilInput {
t.Fatalf("Expected ErrDecoderNilInput when decoding from a nil reader, got %v", err)
}
emptyZstdBlob := []byte{40, 181, 47, 253, 32, 0, 1, 0, 0}
dec.Reset(bytes.NewBuffer(emptyZstdBlob))
result, err := io.ReadAll(dec)
if err != nil && err != io.EOF {
t.Fatal(err)
}
if len(result) != 0 {
t.Fatalf("Expected to read 0 bytes, actually read %d", len(result))
}
dec.Reset(nil)
_, err = io.ReadAll(dec)
if err != ErrDecoderNilInput {
t.Fatalf("Expected ErrDecoderNilInput when decoding from a nil reader, got %v", err)
}
dec.Reset(bytes.NewBuffer(emptyZstdBlob))
result, err = io.ReadAll(dec)
if err != nil && err != io.EOF {
t.Fatal(err)
}
if len(result) != 0 {
t.Fatalf("Expected to read 0 bytes, actually read %d", len(result))
}
}
func TestIgnoreChecksum(t *testing.T) {
// zstd file containing text "compress\n" and has an xxhash checksum
zstdBlob := []byte{0x28, 0xb5, 0x2f, 0xfd, 0x24, 0x09, 0x49, 0x00, 0x00, 'C', 'o', 'm', 'p', 'r', 'e', 's', 's', '\n', 0x79, 0x6e, 0xe0, 0xd2}
// replace letter 'c' with 'C', so decoding should fail.
zstdBlob[9] = 'C'
{
// Check if the file is indeed incorrect
dec, err := NewReader(nil)
if err != nil {
t.Fatal(err)
}
defer dec.Close()
dec.Reset(bytes.NewBuffer(zstdBlob))
_, err = io.ReadAll(dec)
if err == nil {
t.Fatal("Expected decoding error")
}
if !errors.Is(err, ErrCRCMismatch) {
t.Fatalf("Expected checksum error, got '%s'", err)
}
}
{
// Ignore CRC error and decompress the content
dec, err := NewReader(nil, IgnoreChecksum(true))
if err != nil {
t.Fatal(err)
}
defer dec.Close()
dec.Reset(bytes.NewBuffer(zstdBlob))
res, err := io.ReadAll(dec)
if err != nil {
t.Fatalf("Unexpected error: '%s'", err)
}
want := []byte{'C', 'o', 'm', 'p', 'r', 'e', 's', 's', '\n'}
if !bytes.Equal(res, want) {
t.Logf("want: %s", want)
t.Logf("got: %s", res)
t.Fatalf("Wrong output")
}
}
}
func timeout(after time.Duration) (cancel func()) {
if isRaceTest {
return func() {}
}
c := time.After(after)
cc := make(chan struct{})
go func() {
select {
case <-cc:
return
case <-c:
buf := make([]byte, 1<<20)
stacklen := runtime.Stack(buf, true)
log.Printf("=== Timeout, assuming deadlock ===\n*** goroutine dump...\n%s\n*** end\n", string(buf[:stacklen]))
os.Exit(2)
}
}()
return func() {
close(cc)
}
}
func TestWithDecodeAllCapLimit(t *testing.T) {
var encs []*Encoder
var decs []*Decoder
addEnc := func(e *Encoder, _ error) {
encs = append(encs, e)
}
addDec := func(d *Decoder, _ error) {
decs = append(decs, d)
}
addEnc(NewWriter(nil, WithZeroFrames(true), WithWindowSize(4<<10)))
addEnc(NewWriter(nil, WithEncoderConcurrency(1), WithWindowSize(4<<10)))
addEnc(NewWriter(nil, WithZeroFrames(false), WithWindowSize(4<<10)))
addEnc(NewWriter(nil, WithWindowSize(128<<10)))
addDec(NewReader(nil, WithDecodeAllCapLimit(true)))
addDec(NewReader(nil, WithDecodeAllCapLimit(true), WithDecoderConcurrency(1)))
addDec(NewReader(nil, WithDecodeAllCapLimit(true), WithDecoderLowmem(true)))
addDec(NewReader(nil, WithDecodeAllCapLimit(true), WithDecoderMaxWindow(128<<10)))
addDec(NewReader(nil, WithDecodeAllCapLimit(true), WithDecoderMaxMemory(1<<20)))
for sz := 0; sz < 1<<20; sz = (sz + 1) * 2 {
sz := sz
t.Run(strconv.Itoa(sz), func(t *testing.T) {
t.Parallel()
for ei, enc := range encs {
for di, dec := range decs {
t.Run(fmt.Sprintf("e%d:d%d", ei, di), func(t *testing.T) {
encoded := enc.EncodeAll(make([]byte, sz), nil)
for i := sz - 1; i < sz+1; i++ {
if i < 0 {
continue
}
const existinglen = 5
got, err := dec.DecodeAll(encoded, make([]byte, existinglen, i+existinglen))
if i < sz {
if err != ErrDecoderSizeExceeded {
t.Errorf("cap: %d, want %v, got %v", i, ErrDecoderSizeExceeded, err)
}
} else {
if err != nil {
t.Errorf("cap: %d, want %v, got %v", i, nil, err)
continue
}
if len(got) != existinglen+i {
t.Errorf("cap: %d, want output size %d, got %d", i, existinglen+i, len(got))
}
}
}
})
}
}
})
}
}
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"sort"
"github.com/klauspost/compress/huff0"
)
type dict struct {
id uint32
litEnc *huff0.Scratch
llDec, ofDec, mlDec sequenceDec
offsets [3]int
content []byte
}
const dictMagic = "\x37\xa4\x30\xec"
// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
const dictMaxLength = 1 << 31
// ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 {
if d == nil {
return 0
}
return d.id
}
// ContentSize returns the dictionary content size or 0 if d is nil.
func (d *dict) ContentSize() int {
if d == nil {
return 0
}
return len(d.content)
}
// Content returns the dictionary content.
func (d *dict) Content() []byte {
if d == nil {
return nil
}
return d.content
}
// Offsets returns the initial offsets.
func (d *dict) Offsets() [3]int {
if d == nil {
return [3]int{}
}
return d.offsets
}
// LitEncoder returns the literal encoder.
func (d *dict) LitEncoder() *huff0.Scratch {
if d == nil {
return nil
}
return d.litEnc
}
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
// Check static field size.
if len(b) <= 8+(3*4) {
return nil, io.ErrUnexpectedEOF
}
d := dict{
llDec: sequenceDec{fse: &fseDecoder{}},
ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}},
}
if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch
}
d.id = binary.LittleEndian.Uint32(b[4:8])
if d.id == 0 {
return nil, errors.New("dictionaries cannot have ID 0")
}
// Read literal table
var err error
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
return nil, fmt.Errorf("loading literal table: %w", err)
}
d.litEnc.Reuse = huff0.ReusePolicyMust
br := byteReader{
b: b,
off: 0,
}
readDec := func(i tableIndex, dec *fseDecoder) error {
if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil {
return err
}
if br.overread() {
return io.ErrUnexpectedEOF
}
err = dec.transform(symbolTableX[i])
if err != nil {
println("Transform table error:", err)
return err
}
if debugDecoder || debugEncoder {
println("Read table ok", "symbolLen:", dec.symbolLen)
}
// Set decoders as predefined so they aren't reused.
dec.preDefined = true
return nil
}
if err := readDec(tableOffsets, d.ofDec.fse); err != nil {
return nil, err
}
if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil {
return nil, err
}
if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil {
return nil, err
}
if br.remain() < 12 {
return nil, io.ErrUnexpectedEOF
}
d.offsets[0] = int(br.Uint32())
br.advance(4)
d.offsets[1] = int(br.Uint32())
br.advance(4)
d.offsets[2] = int(br.Uint32())
br.advance(4)
if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 {
return nil, errors.New("invalid offset in dictionary")
}
d.content = make([]byte, br.remain())
copy(d.content, br.unread())
if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) {
return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets)
}
return &d, nil
}
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
func InspectDictionary(b []byte) (interface {
ID() uint32
ContentSize() int
Content() []byte
Offsets() [3]int
LitEncoder() *huff0.Scratch
}, error) {
initPredefined()
d, err := loadDict(b)
return d, err
}
type BuildDictOptions struct {
// Dictionary ID.
ID uint32
// Content to use to create dictionary tables.
Contents [][]byte
// History to use for all blocks.
History []byte
// Offsets to use.
Offsets [3]int
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
// See https://github.com/facebook/zstd/issues/3724
CompatV155 bool
// Use the specified encoder level.
// The dictionary will be built using the specified encoder level,
// which will reflect speed and make the dictionary tailored for that level.
// If not set SpeedBestCompression will be used.
Level EncoderLevel
// DebugOut will write stats and other details here if set.
DebugOut io.Writer
}
func BuildDict(o BuildDictOptions) ([]byte, error) {
initPredefined()
hist := o.History
contents := o.Contents
debug := o.DebugOut != nil
println := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintln(o.DebugOut, args...)
}
}
printf := func(s string, args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintf(o.DebugOut, s, args...)
}
}
print := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprint(o.DebugOut, args...)
}
}
if int64(len(hist)) > dictMaxLength {
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
}
if len(hist) < 8 {
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
}
if len(contents) == 0 {
return nil, errors.New("no content provided")
}
d := dict{
id: o.ID,
litEnc: nil,
llDec: sequenceDec{},
ofDec: sequenceDec{},
mlDec: sequenceDec{},
offsets: o.Offsets,
content: hist,
}
block := blockEnc{lowMem: false}
block.init()
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
if o.Level != 0 {
eOpts := encoderOptions{
level: o.Level,
blockSize: maxMatchLen,
windowSize: maxMatchLen,
dict: &d,
lowMem: false,
}
enc = eOpts.encoder()
} else {
o.Level = SpeedBestCompression
}
var (
remain [256]int
ll [256]int
ml [256]int
of [256]int
)
addValues := func(dst *[256]int, src []byte) {
for _, v := range src {
dst[v]++
}
}
addHist := func(dst *[256]int, src *[256]uint32) {
for i, v := range src {
dst[i] += int(v)
}
}
seqs := 0
nUsed := 0
litTotal := 0
newOffsets := make(map[uint32]int, 1000)
for _, b := range contents {
block.reset(nil)
if len(b) < 8 {
continue
}
nUsed++
enc.Reset(&d, true)
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
if len(block.sequences) == 0 {
continue
}
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
addHist(&ml, block.coders.mlEnc.Histogram())
addHist(&of, block.coders.ofEnc.Histogram())
for i, seq := range block.sequences {
if i > 3 {
break
}
offset := seq.offset
if offset == 0 {
continue
}
if int(offset) >= len(o.History) {
continue
}
if offset > 3 {
newOffsets[offset-3]++
} else {
newOffsets[uint32(o.Offsets[offset-1])]++
}
}
}
// Find most used offsets.
var sortedOffsets []uint32
for k := range newOffsets {
sortedOffsets = append(sortedOffsets, k)
}
sort.Slice(sortedOffsets, func(i, j int) bool {
a, b := sortedOffsets[i], sortedOffsets[j]
if a == b {
// Prefer the longer offset
return sortedOffsets[i] > sortedOffsets[j]
}
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
})
if len(sortedOffsets) > 3 {
if debug {
print("Offsets:")
for i, v := range sortedOffsets {
if i > 20 {
break
}
printf("[%d: %d],", v, newOffsets[v])
}
println("")
}
sortedOffsets = sortedOffsets[:3]
}
for i, v := range sortedOffsets {
o.Offsets[i] = int(v)
}
if debug {
println("New repeat offsets", o.Offsets)
}
if nUsed == 0 || seqs == 0 {
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
}
if debug {
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
}
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
if nUsed == 0 {
nUsed = 1
}
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
var maxSym uint8
var maxCount int
var fakeLength int
for i, v := range src {
if v > 0 {
v = v / nUsed
if v == 0 {
v = 1
}
}
if v > maxCount {
maxCount = v
}
if v != 0 {
maxSym = uint8(i)
}
fakeLength += v
hist[i] = uint32(v)
}
// Ensure we aren't trying to represent RLE.
if maxCount == fakeLength {
for i := range hist {
if uint8(i) == maxSym {
fakeLength++
maxSym++
hist[i+1] = 1
if maxSym > 1 {
break
}
}
if hist[0] == 0 {
fakeLength++
hist[i] = 1
if maxSym > 1 {
break
}
}
}
}
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
err := dst.normalizeCount(fakeLength)
if err != nil {
return nil, err
}
if debug {
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
}
return dst.writeCount(nil)
}
if debug {
print("Literal lengths: ")
}
llTable, err := copyHist(block.coders.llEnc, &ll)
if err != nil {
return nil, err
}
if debug {
print("Match lengths: ")
}
mlTable, err := copyHist(block.coders.mlEnc, &ml)
if err != nil {
return nil, err
}
if debug {
print("Offsets: ")
}
ofTable, err := copyHist(block.coders.ofEnc, &of)
if err != nil {
return nil, err
}
// Literal table
avgSize := litTotal
if avgSize > huff0.BlockSizeMax/2 {
avgSize = huff0.BlockSizeMax / 2
}
huffBuff := make([]byte, 0, avgSize)
// Target size
div := litTotal / avgSize
if div < 1 {
div = 1
}
if debug {
println("Huffman weights:")
}
for i, n := range remain[:] {
if n > 0 {
n = n / div
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
if debug {
printf("[%d: %d], ", i, n)
}
}
}
if o.CompatV155 && remain[255]/div == 0 {
huffBuff = append(huffBuff, 255)
}
scratch := &huff0.Scratch{TableLog: 11}
for tries := 0; tries < 255; tries++ {
scratch = &huff0.Scratch{TableLog: 11}
_, _, err = huff0.Compress1X(huffBuff, scratch)
if err == nil {
break
}
if debug {
printf("Try %d: Huffman error: %v\n", tries+1, err)
}
huffBuff = huffBuff[:0]
if tries == 250 {
if debug {
println("Huffman: Bailing out with predefined table")
}
// Bail out.... Just generate something
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
for i := 0; i < 128; i++ {
huffBuff = append(huffBuff, byte(i))
}
continue
}
if errors.Is(err, huff0.ErrIncompressible) {
// Try truncating least common.
for i, n := range remain[:] {
if n > 0 {
n = n / (div * (i + 1))
if n > 0 {
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
huffBuff = append(huffBuff, 255)
}
if len(huffBuff) == 0 {
huffBuff = append(huffBuff, 0, 255)
}
}
if errors.Is(err, huff0.ErrUseRLE) {
for i, n := range remain[:] {
n = n / (div * (i + 1))
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
var out bytes.Buffer
out.Write([]byte(dictMagic))
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
out.Write(scratch.OutTable)
if debug {
println("huff table:", len(scratch.OutTable), "bytes")
println("of table:", len(ofTable), "bytes")
println("ml table:", len(mlTable), "bytes")
println("ll table:", len(llTable), "bytes")
}
out.Write(ofTable)
out.Write(mlTable)
out.Write(llTable)
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
out.Write(hist)
if debug {
_, err := loadDict(out.Bytes())
if err != nil {
panic(err)
}
i, err := InspectDictionary(out.Bytes())
if err != nil {
panic(err)
}
println("ID:", i.ID())
println("Content size:", i.ContentSize())
println("Encoder:", i.LitEncoder() != nil)
println("Offsets:", i.Offsets())
var totalSize int
for _, b := range contents {
totalSize += len(b)
}
encWith := func(opts ...EOption) int {
enc, err := NewWriter(nil, opts...)
if err != nil {
panic(err)
}
defer enc.Close()
var dst []byte
var totalSize int
for _, b := range contents {
dst = enc.EncodeAll(b, dst[:0])
totalSize += len(dst)
}
return totalSize
}
plain := encWith(WithEncoderLevel(o.Level))
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
println("Input size:", totalSize)
println("Plain Compressed:", plain)
println("Dict Compressed:", withDict)
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
}
return out.Bytes(), nil
}
package zstd
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"testing"
"github.com/klauspost/compress/zip"
)
func TestDecoder_SmallDict(t *testing.T) {
// All files have CRC
zr := testCreateZipReader("testdata/dict-tests-small.zip", t)
dicts := readDicts(t, zr)
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderDicts(dicts...))
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
t.Run("decodeall-"+tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
t.Fatal(err)
}
_, err = dec.DecodeAll(in, got[:0])
if err != nil {
t.Fatal(err)
}
})
}
}
func TestEncoder_SmallDict(t *testing.T) {
// All files have CRC
zr := testCreateZipReader("testdata/dict-tests-small.zip", t)
var dicts [][]byte
var encs []*Encoder
var noDictEncs []*Encoder
var encNames []string
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".dict") {
continue
}
func() {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
dicts = append(dicts, in)
for level := SpeedFastest; level < speedLast; level++ {
if isRaceTest && level >= SpeedBestCompression {
break
}
enc, err := NewWriter(nil, WithEncoderConcurrency(1), WithEncoderDict(in), WithEncoderLevel(level), WithWindowSize(1<<17))
if err != nil {
t.Fatal(err)
}
encs = append(encs, enc)
encNames = append(encNames, fmt.Sprint("level-", level.String(), "-dict-", len(dicts)))
enc, err = NewWriter(nil, WithEncoderConcurrency(1), WithEncoderLevel(level), WithWindowSize(1<<17))
if err != nil {
t.Fatal(err)
}
noDictEncs = append(noDictEncs, enc)
}
}()
}
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderDicts(dicts...))
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
for i, tt := range zr.File {
if testing.Short() && i > 100 {
break
}
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
decoded, err := dec.DecodeAll(in, nil)
if err != nil {
t.Fatal(err)
}
if testing.Short() && len(decoded) > 1000 {
continue
}
t.Run("encodeall-"+tt.Name, func(t *testing.T) {
// Attempt to compress with all dicts
var b []byte
var tmp []byte
for i := range encs {
i := i
t.Run(encNames[i], func(t *testing.T) {
b = encs[i].EncodeAll(decoded, b[:0])
tmp, err = dec.DecodeAll(in, tmp[:0])
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(tmp, decoded) {
t.Fatal("output mismatch")
}
tmp = noDictEncs[i].EncodeAll(decoded, tmp[:0])
if strings.Contains(t.Name(), "dictplain") && strings.Contains(t.Name(), "dict-1") {
t.Log("reference:", len(in), "no dict:", len(tmp), "with dict:", len(b), "SAVED:", len(tmp)-len(b))
// Check that we reduced this significantly
if len(b) > 250 {
t.Error("output was bigger than expected")
}
}
})
}
})
t.Run("stream-"+tt.Name, func(t *testing.T) {
// Attempt to compress with all dicts
var tmp []byte
for i := range encs {
i := i
enc := encs[i]
t.Run(encNames[i], func(t *testing.T) {
var buf bytes.Buffer
enc.ResetContentSize(&buf, int64(len(decoded)))
_, err := enc.Write(decoded)
if err != nil {
t.Fatal(err)
}
err = enc.Close()
if err != nil {
t.Fatal(err)
}
tmp, err = dec.DecodeAll(buf.Bytes(), tmp[:0])
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(tmp, decoded) {
t.Fatal("output mismatch")
}
var buf2 bytes.Buffer
noDictEncs[i].Reset(&buf2)
noDictEncs[i].Write(decoded)
noDictEncs[i].Close()
if strings.Contains(t.Name(), "dictplain") && strings.Contains(t.Name(), "dict-1") {
t.Log("reference:", len(in), "no dict:", buf2.Len(), "with dict:", buf.Len(), "SAVED:", buf2.Len()-buf.Len())
// Check that we reduced this significantly
if buf.Len() > 250 {
t.Error("output was bigger than expected")
}
}
})
}
})
}
}
func TestEncoder_SmallDictFresh(t *testing.T) {
// All files have CRC
zr := testCreateZipReader("testdata/dict-tests-small.zip", t)
var dicts [][]byte
var encs []func() *Encoder
var noDictEncs []*Encoder
var encNames []string
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".dict") {
continue
}
func() {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
dicts = append(dicts, in)
for level := SpeedFastest; level < speedLast; level++ {
if isRaceTest && level >= SpeedBestCompression {
break
}
level := level
encs = append(encs, func() *Encoder {
enc, err := NewWriter(nil, WithEncoderConcurrency(1), WithEncoderDict(in), WithEncoderLevel(level), WithWindowSize(1<<17))
if err != nil {
t.Fatal(err)
}
return enc
})
encNames = append(encNames, fmt.Sprint("level-", level.String(), "-dict-", len(dicts)))
enc, err := NewWriter(nil, WithEncoderConcurrency(1), WithEncoderLevel(level), WithWindowSize(1<<17))
if err != nil {
t.Fatal(err)
}
noDictEncs = append(noDictEncs, enc)
}
}()
}
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderDicts(dicts...))
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
for i, tt := range zr.File {
if testing.Short() && i > 100 {
break
}
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
decoded, err := dec.DecodeAll(in, nil)
if err != nil {
t.Fatal(err)
}
if testing.Short() && len(decoded) > 1000 {
continue
}
t.Run("encodeall-"+tt.Name, func(t *testing.T) {
// Attempt to compress with all dicts
var b []byte
var tmp []byte
for i := range encs {
i := i
t.Run(encNames[i], func(t *testing.T) {
enc := encs[i]()
defer enc.Close()
b = enc.EncodeAll(decoded, b[:0])
tmp, err = dec.DecodeAll(in, tmp[:0])
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(tmp, decoded) {
t.Fatal("output mismatch")
}
tmp = noDictEncs[i].EncodeAll(decoded, tmp[:0])
if strings.Contains(t.Name(), "dictplain") && strings.Contains(t.Name(), "dict-1") {
t.Log("reference:", len(in), "no dict:", len(tmp), "with dict:", len(b), "SAVED:", len(tmp)-len(b))
// Check that we reduced this significantly
if len(b) > 250 {
t.Error("output was bigger than expected")
}
}
})
}
})
t.Run("stream-"+tt.Name, func(t *testing.T) {
// Attempt to compress with all dicts
var tmp []byte
for i := range encs {
i := i
t.Run(encNames[i], func(t *testing.T) {
enc := encs[i]()
defer enc.Close()
var buf bytes.Buffer
enc.ResetContentSize(&buf, int64(len(decoded)))
_, err := enc.Write(decoded)
if err != nil {
t.Fatal(err)
}
err = enc.Close()
if err != nil {
t.Fatal(err)
}
tmp, err = dec.DecodeAll(buf.Bytes(), tmp[:0])
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(tmp, decoded) {
t.Fatal("output mismatch")
}
var buf2 bytes.Buffer
noDictEncs[i].Reset(&buf2)
noDictEncs[i].Write(decoded)
noDictEncs[i].Close()
if strings.Contains(t.Name(), "dictplain") && strings.Contains(t.Name(), "dict-1") {
t.Log("reference:", len(in), "no dict:", buf2.Len(), "with dict:", buf.Len(), "SAVED:", buf2.Len()-buf.Len())
// Check that we reduced this significantly
if buf.Len() > 250 {
t.Error("output was bigger than expected")
}
}
})
}
})
}
}
func benchmarkEncodeAllLimitedBySize(b *testing.B, lowerLimit int, upperLimit int) {
zr := testCreateZipReader("testdata/dict-tests-small.zip", b)
t := testing.TB(b)
var dicts [][]byte
var encs []*Encoder
var encNames []string
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".dict") {
continue
}
func() {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
dicts = append(dicts, in)
for level := SpeedFastest; level < speedLast; level++ {
enc, err := NewWriter(nil, WithEncoderDict(in), WithEncoderLevel(level))
if err != nil {
t.Fatal(err)
}
encs = append(encs, enc)
encNames = append(encNames, fmt.Sprint("level-", level.String(), "-dict-", len(dicts)))
}
}()
}
const nPer = int(speedLast - SpeedFastest)
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderDicts(dicts...))
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
tested := make(map[int]struct{})
for j, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
decoded, err := dec.DecodeAll(in, nil)
if err != nil {
t.Fatal(err)
}
// Only test each size once
if _, ok := tested[len(decoded)]; ok {
continue
}
tested[len(decoded)] = struct{}{}
if len(decoded) < lowerLimit {
continue
}
if upperLimit > 0 && len(decoded) > upperLimit {
continue
}
for i := range encs {
// Only do 1 dict (4 encoders) for now.
if i == nPer-1 {
break
}
// Attempt to compress with all dicts
encIdx := (i + j*nPer) % len(encs)
enc := encs[encIdx]
b.Run(fmt.Sprintf("length-%d-%s", len(decoded), encNames[encIdx]), func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
dst := make([]byte, 0, len(decoded)+10)
b.SetBytes(int64(len(decoded)))
b.ResetTimer()
b.ReportAllocs()
for pb.Next() {
dst = enc.EncodeAll(decoded, dst[:0])
}
})
})
}
}
}
func BenchmarkEncodeAllDict0_1024(b *testing.B) {
benchmarkEncodeAllLimitedBySize(b, 0, 1024)
}
func BenchmarkEncodeAllDict1024_8192(b *testing.B) {
benchmarkEncodeAllLimitedBySize(b, 1024, 8192)
}
func BenchmarkEncodeAllDict8192_16384(b *testing.B) {
benchmarkEncodeAllLimitedBySize(b, 8192, 16384)
}
func BenchmarkEncodeAllDict16384_65536(b *testing.B) {
benchmarkEncodeAllLimitedBySize(b, 16384, 65536)
}
func BenchmarkEncodeAllDict65536_0(b *testing.B) {
benchmarkEncodeAllLimitedBySize(b, 65536, 0)
}
func TestDecoder_MoreDicts(t *testing.T) {
// All files have CRC
// https://files.klauspost.com/compress/zstd-dict-tests.zip
fn := "testdata/zstd-dict-tests.zip"
data, err := os.ReadFile(fn)
if err != nil {
t.Skip("extended dict test not found.")
}
zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
t.Fatal(err)
}
var dicts [][]byte
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".dict") {
continue
}
func() {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
dicts = append(dicts, in)
}()
}
dec, err := NewReader(nil, WithDecoderConcurrency(1), WithDecoderDicts(dicts...))
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
for i, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
if testing.Short() && i > 50 {
continue
}
t.Run("decodeall-"+tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
t.Fatal(err)
}
_, err = dec.DecodeAll(in, got[:0])
if err != nil {
t.Fatal(err)
}
})
}
}
func TestDecoder_MoreDicts2(t *testing.T) {
// All files have CRC
// https://files.klauspost.com/compress/zstd-dict-tests.zip
fn := "testdata/zstd-dict-tests.zip"
data, err := os.ReadFile(fn)
if err != nil {
t.Skip("extended dict test not found.")
}
zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
t.Fatal(err)
}
var dicts [][]byte
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".dict") {
continue
}
func() {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
dicts = append(dicts, in)
}()
}
dec, err := NewReader(nil, WithDecoderConcurrency(2), WithDecoderDicts(dicts...))
if err != nil {
t.Fatal(err)
return
}
defer dec.Close()
for i, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".zst") {
continue
}
if testing.Short() && i > 50 {
continue
}
t.Run("decodeall-"+tt.Name, func(t *testing.T) {
r, err := tt.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
got, err := dec.DecodeAll(in, nil)
if err != nil {
t.Fatal(err)
}
_, err = dec.DecodeAll(in, got[:0])
if err != nil {
t.Fatal(err)
}
})
}
}
func readDicts(tb testing.TB, zr *zip.Reader) [][]byte {
var dicts [][]byte
for _, tt := range zr.File {
if !strings.HasSuffix(tt.Name, ".dict") {
continue
}
func() {
r, err := tt.Open()
if err != nil {
tb.Fatal(err)
}
defer r.Close()
in, err := io.ReadAll(r)
if err != nil {
tb.Fatal(err)
}
dicts = append(dicts, in)
}()
}
return dicts
}
// Test decoding of zstd --patch-from output.
func TestDecoderRawDict(t *testing.T) {
t.Parallel()
dict, err := os.ReadFile("testdata/delta/source.txt")
if err != nil {
t.Fatal(err)
}
delta, err := os.Open("testdata/delta/target.txt.zst")
if err != nil {
t.Fatal(err)
}
defer delta.Close()
dec, err := NewReader(delta, WithDecoderDictRaw(0, dict))
if err != nil {
t.Fatal(err)
}
out, err := io.ReadAll(dec)
if err != nil {
t.Fatal(err)
}
ref, err := os.ReadFile("testdata/delta/target.txt")
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(out, ref) {
t.Errorf("mismatch: got %q, wanted %q", out, ref)
}
}
package zstd
import (
"fmt"
"math/bits"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
const (
dictShardBits = 6
)
type fastBase struct {
// cur is the offset at the start of hist
cur int32
// maximum offset. Should be at least 2x block size.
maxMatchOff int32
bufferReset int32
hist []byte
crc *xxhash.Digest
tmp [8]byte
blk *blockEnc
lastDictID uint32
lowMem bool
}
// CRC returns the underlying CRC writer.
func (e *fastBase) CRC() *xxhash.Digest {
return e.crc
}
// AppendCRC will append the CRC to the destination slice and return it.
func (e *fastBase) AppendCRC(dst []byte) []byte {
crc := e.crc.Sum(e.tmp[:0])
dst = append(dst, crc[7], crc[6], crc[5], crc[4])
return dst
}
// WindowSize returns the window size of the encoder,
// or a window size small enough to contain the input size, if > 0.
func (e *fastBase) WindowSize(size int64) int32 {
if size > 0 && size < int64(e.maxMatchOff) {
b := int32(1) << uint(bits.Len(uint(size)))
// Keep minimum window.
if b < 1024 {
b = 1024
}
return b
}
return e.maxMatchOff
}
// Block returns the current block.
func (e *fastBase) Block() *blockEnc {
return e.blk
}
func (e *fastBase) addBlock(src []byte) int32 {
if debugAsserts && e.cur > e.bufferReset {
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
}
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.ensureHist(len(src))
} else {
if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) {
panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff))
}
// Move down
offset := int32(len(e.hist)) - e.maxMatchOff
copy(e.hist[0:e.maxMatchOff], e.hist[offset:])
e.cur += offset
e.hist = e.hist[:e.maxMatchOff]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
// ensureHist will ensure that history can keep at least this many bytes.
func (e *fastBase) ensureHist(n int) {
if cap(e.hist) >= n {
return
}
l := e.maxMatchOff
if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize {
l += maxCompressedBlockSize
} else {
l += e.maxMatchOff
}
// Make it at least 1MB.
if l < 1<<20 && !e.lowMem {
l = 1 << 20
}
// Make it at least the requested size.
if l < int32(n) {
l = int32(n)
}
e.hist = make([]byte, 0, l)
}
// useBlock will replace the block with the provided one,
// but transfer recent offsets from the previous.
func (e *fastBase) UseBlock(enc *blockEnc) {
enc.reset(e.blk)
e.blk = enc
}
func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
if debugAsserts {
if s < 0 {
err := fmt.Sprintf("s (%d) < 0", s)
panic(err)
}
if t < 0 {
err := fmt.Sprintf("t (%d) < 0", t)
panic(err)
}
if s-t > e.maxMatchOff {
err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff)
panic(err)
}
if len(src)-int(s) > maxCompressedBlockSize {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
return int32(matchLen(src[s:], src[t:]))
}
// Reset the encoding table.
func (e *fastBase) resetBase(d *dict, singleBlock bool) {
if e.blk == nil {
e.blk = &blockEnc{lowMem: e.lowMem}
e.blk.init()
} else {
e.blk.reset(nil)
}
e.blk.initNewEncode()
if e.crc == nil {
e.crc = xxhash.New()
} else {
e.crc.Reset()
}
e.blk.dictLitEnc = nil
if d != nil {
low := e.lowMem
if singleBlock {
e.lowMem = true
}
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
e.lowMem = low
}
// We offset current position so everything will be out of reach.
// If above reset line, history will be purged.
if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist))
}
e.hist = e.hist[:0]
if d != nil {
// Set offsets (currently not used)
for i, off := range d.offsets {
e.blk.recentOffsets[i] = uint32(off)
e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i]
}
// Transfer litenc.
e.blk.dictLitEnc = d.litEnc
e.hist = append(e.hist, d.content...)
}
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"bytes"
"fmt"
"github.com/klauspost/compress"
)
const (
bestLongTableBits = 22 // Bits used in the long match table
bestLongTableSize = 1 << bestLongTableBits // Size of the table
bestLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the
// long match table and match offsets are quite big.
// This greatly depends on the type of input.
bestShortTableBits = 18 // Bits used in the short match table
bestShortTableSize = 1 << bestShortTableBits // Size of the table
bestShortLen = 4 // Bytes used for table hash
)
type match struct {
offset int32
s int32
length int32
rep int32
est int32
}
const highScore = maxMatchLen * 8
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
mlc := mlCode(uint32(m.length - zstdMinMatch))
var ofc uint8
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
ofc = ofCode(uint32(m.rep) & 3)
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
// Add cost of match encoding...
m.est = int32(ofTT.outBits + mlTT.outBits)
m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16)
// Subtract savings compared to literal encoding...
m.est -= (m.length * bitsPerByte) >> 10
if m.est > 0 {
// Unlikely gain..
m.length = 0
m.est = highScore
}
}
// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
// The long match table contains the previous entry with the same hash,
// effectively making it a "chain" of length 2.
// When we find a long match we choose between the two values and select the longest.
// When we find a short match, after checking the long, we check if we can find a long at n+1
// and that it is longer (lazy matching).
type bestFastEncoder struct {
fastBase
table [bestShortTableSize]prevEntry
longTable [bestLongTableSize]prevEntry
dictTable []prevEntry
dictLongTable []prevEntry
}
// Encode improves compression...
func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
const (
// Input margin is the number of bytes we read (8)
// and the maximum we will read ahead (2)
inputMargin = 8 + 4
minNonLiteralBlockSize = 16
)
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
e.table = [bestShortTableSize]prevEntry{}
e.longTable = [bestLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
v2 := e.table[i].prev
if v < minOff {
v = 0
v2 = 0
} else {
v = v - e.cur + e.maxMatchOff
if v2 < minOff {
v2 = 0
} else {
v2 = v2 - e.cur + e.maxMatchOff
}
}
e.table[i] = prevEntry{
offset: v,
prev: v2,
}
}
for i := range e.longTable[:] {
v := e.longTable[i].offset
v2 := e.longTable[i].prev
if v < minOff {
v = 0
v2 = 0
} else {
v = v - e.cur + e.maxMatchOff
if v2 < minOff {
v2 = 0
} else {
v2 = v2 - e.cur + e.maxMatchOff
}
}
e.longTable[i] = prevEntry{
offset: v,
prev: v2,
}
}
e.cur = e.maxMatchOff
break
}
// Add block to history
s := e.addBlock(src)
blk.size = len(src)
// Check RLE first
if len(src) > zstdMinMatch {
ml := matchLen(src[1:], src)
if ml == len(src)-1 {
blk.literals = append(blk.literals, src[0])
blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
return
}
}
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Use this to estimate literal cost.
// Scaled by 10 bits.
bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src))
// Huffman can never go < 1 bit/byte
if bitsPerByte < 1024 {
bitsPerByte = 1024
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
const kSearchStrength = 10
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
offset3 := int32(blk.recentOffsets[2])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
const goodEnough = 250
cv := load6432(src, s)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
// Set m to a match at offset if it looks like that will improve compression.
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
delta := s - offset
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
return
}
// Try to quick reject if we already have a long match.
if m.length > 16 {
left := len(src) - int(m.s+m.length)
// If we are too close to the end, keep as is.
if left <= 0 {
return
}
checkLen := m.length - (s - m.s) - 8
if left > 2 && checkLen > 4 {
// Check 4 bytes, 4 bytes from the end of the current match.
a := load3232(src, offset+checkLen)
b := load3232(src, s+checkLen)
if a != b {
return
}
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
if m.rep <= 0 {
// Extend candidate match backwards as far as possible.
// Do not extend repeats as we can assume they are optimal
// and offsets change if s == nextEmit.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
s--
offset--
l++
}
}
if debugAsserts {
if offset >= s {
panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
}
if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
cand := match{offset: offset, s: s, length: l, rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
*m = cand
}
}
best := match{s: s, est: highScore}
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
if s == nextEmit {
// Check repeats straight after a match.
improve(&best, s-offset2, s, uint32(cv), 1|4)
improve(&best, s-offset3, s, uint32(cv), 2|4)
if offset1 > 1 {
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
}
}
// If either no match or a non-repeat match, check at + 1
if best.rep <= 0 {
cv32 := uint32(cv >> 8)
spp := s + 1
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.rep < 0 {
cv32 = uint32(cv >> 24)
spp += 2
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
}
}
}
// Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
index0 := s + 1
// Look far ahead, unless we have a really long match already...
if best.length < goodEnough {
// No match found, move forward on input, no need to check forward...
if best.length < 4 {
s += 1 + (s-nextEmit)>>(kSearchStrength-1)
if s >= sLimit {
break encodeLoop
}
continue
}
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s+1)
cv2 := load6432(src, s+2)
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
// Long at s+1, s+2
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
}
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
// We cannot do this if we have already indexed this position.
const skipBeginning = 2
if best.s > s-skipBeginning {
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
}
}
}
}
}
if debugAsserts {
if best.offset >= best.s {
panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
}
if best.s < nextEmit {
panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
}
if best.offset < s-e.maxMatchOff {
panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
}
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
}
}
// We have a match, we can store the forward value
s = best.s
if best.rep > 0 {
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
addLiterals(&seq, best.s)
// Repeat. If bit 4 is set, this is a non-lit repeat.
seq.offset = uint32(best.rep & 3)
if debugSequences {
println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
}
blk.sequences = append(blk.sequences, seq)
// Index old s + 1 -> s - 1
s = best.s + best.length
nextEmit = s
// Index skipped...
end := s
if s > sLimit+4 {
end = sLimit + 4
}
off := index0 + e.cur
for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
off++
index0++
}
switch best.rep {
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
case 3, 4 | 2:
offset1, offset2, offset3 = offset3, offset1, offset2
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && int(offset1) > len(src) {
panic("invalid offset")
}
// Write our sequence
var seq seq
l := best.length
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
// Index old s + 1 -> s - 1 or sLimit
end := s
if s > sLimit-4 {
end = sLimit - 4
}
off := index0 + e.cur
for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
off++
}
if s >= sLimit {
break encodeLoop
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
blk.recentOffsets[2] = uint32(offset3)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
// EncodeNoHist will encode a block with no history and no following blocks.
// Most notable difference is that src will not be copied for history and
// we do not need to check for max match length.
func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
e.ensureHist(len(src))
e.Encode(blk, src)
}
// Reset will reset and set a dictionary if not nil
func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d == nil {
return
}
// Init or copy dict table
if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
if len(e.dictTable) != len(e.table) {
e.dictTable = make([]prevEntry, len(e.table))
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff; i < end; i += 4 {
const hashLog = bestShortTableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4
nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5
nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6
nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7
e.dictTable[nextHash] = prevEntry{
prev: e.dictTable[nextHash].offset,
offset: i,
}
e.dictTable[nextHash1] = prevEntry{
prev: e.dictTable[nextHash1].offset,
offset: i + 1,
}
e.dictTable[nextHash2] = prevEntry{
prev: e.dictTable[nextHash2].offset,
offset: i + 2,
}
e.dictTable[nextHash3] = prevEntry{
prev: e.dictTable[nextHash3].offset,
offset: i + 3,
}
}
e.lastDictID = d.id
}
// Init or copy dict table
if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
if len(e.dictLongTable) != len(e.longTable) {
e.dictLongTable = make([]prevEntry, len(e.longTable))
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset,
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56)
h := hashLen(cv, bestLongTableBits, bestLongLen)
e.dictLongTable[h] = prevEntry{
offset: i,
prev: e.dictLongTable[h].offset,
}
off++
}
}
e.lastDictID = d.id
}
// Reset table to initial state
copy(e.longTable[:], e.dictLongTable)
e.cur = e.maxMatchOff
// Reset table to initial state
copy(e.table[:], e.dictTable)
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import "fmt"
const (
betterLongTableBits = 19 // Bits used in the long match table
betterLongTableSize = 1 << betterLongTableBits // Size of the table
betterLongLen = 8 // Bytes used for table hash
// Note: Increasing the short table bits or making the hash shorter
// can actually lead to compression degradation since it will 'steal' more from the
// long match table and match offsets are quite big.
// This greatly depends on the type of input.
betterShortTableBits = 13 // Bits used in the short match table
betterShortTableSize = 1 << betterShortTableBits // Size of the table
betterShortLen = 5 // Bytes used for table hash
betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table
betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard
betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table
betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard
)
type prevEntry struct {
offset int32
prev int32
}
// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches.
// The long match table contains the previous entry with the same hash,
// effectively making it a "chain" of length 2.
// When we find a long match we choose between the two values and select the longest.
// When we find a short match, after checking the long, we check if we can find a long at n+1
// and that it is longer (lazy matching).
type betterFastEncoder struct {
fastBase
table [betterShortTableSize]tableEntry
longTable [betterLongTableSize]prevEntry
}
type betterFastEncoderDict struct {
betterFastEncoder
dictTable []tableEntry
dictLongTable []prevEntry
shortTableShardDirty [betterShortTableShardCnt]bool
longTableShardDirty [betterLongTableShardCnt]bool
allDirty bool
}
// Encode improves compression...
func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
const (
// Input margin is the number of bytes we read (8)
// and the maximum we will read ahead (2)
inputMargin = 8 + 2
minNonLiteralBlockSize = 16
)
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
e.table = [betterShortTableSize]tableEntry{}
e.longTable = [betterLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.table[i].offset = v
}
for i := range e.longTable[:] {
v := e.longTable[i].offset
v2 := e.longTable[i].prev
if v < minOff {
v = 0
v2 = 0
} else {
v = v - e.cur + e.maxMatchOff
if v2 < minOff {
v2 = 0
} else {
v2 = v2 - e.cur + e.maxMatchOff
}
}
e.longTable[i] = prevEntry{
offset: v,
prev: v2,
}
}
e.cur = e.maxMatchOff
break
}
// Add block to history
s := e.addBlock(src)
blk.size = len(src)
// Check RLE first
if len(src) > zstdMinMatch {
ml := matchLen(src[1:], src)
if ml == len(src)-1 {
blk.literals = append(blk.literals, src[0])
blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
return
}
}
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
const stepSize = 1
const kSearchStrength = 9
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
const repOff = 1
repIndex := s - offset1 + repOff
off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
index0 := s + repOff
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
// Index skipped...
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
cv = load6432(src, s)
continue
}
const repOff2 = 1
// We deviate from the reference encoder and also check offset 2.
// Still slower and not much better, so disabled.
// repIndex = s - offset2 + repOff2
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff2
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 2
seq.offset = 2
if debugSequences {
println("repeat sequence 2", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
// Index skipped...
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
}
cv = load6432(src, s)
// Swap offsets
offset1, offset2 = offset2, offset1
continue
}
}
// Find the offsets of our two matches.
coffsetL := candidateL.offset - e.cur
coffsetLP := candidateL.prev - e.cur
// Check if we have a long match.
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
// Found a long match, at least 8 bytes.
matched = e.matchlen(s+8, coffsetL+8, src) + 8
t = coffsetL
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
// Found a long match, at least 8 bytes.
prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
if prevMatch > matched {
matched = prevMatch
t = coffsetLP
}
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
}
break
}
// Check if we have a long match on prev.
if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
// Found a long match, at least 8 bytes.
matched = e.matchlen(s+8, coffsetLP+8, src) + 8
t = coffsetLP
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
break
}
coffsetS := candidateS.offset - e.cur
// Check if we have a short match.
if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
// found a regular match
matched = e.matchlen(s+4, coffsetS+4, src) + 4
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur
// We can store it, since we have at least a 4 byte match.
e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
// Found a long match, at least 8 bytes.
matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
if matchedNext > matched {
t = coffsetL
s += checkAt
matched = matchedNext
if debugMatches {
println("long match (after short)")
}
break
}
}
// Check prev long...
coffsetL = candidateL.prev - e.cur
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
// Found a long match, at least 8 bytes.
matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
if matchedNext > matched {
t = coffsetL
s += checkAt
matched = matchedNext
if debugMatches {
println("prev long match (after short)")
}
break
}
}
t = coffsetS
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
if debugMatches {
println("short match")
}
break
}
// No match found, move forward in input.
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is around 3 bytes, but depends on input.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 3
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
s2 := s + skipBeginning
cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched + skipBeginning
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
}
}
}
// Check prev long...
if true {
coffsetL = candidateL.prev - e.cur - matched + skipBeginning
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
}
}
}
}
}
// A match has been found. Update recent offsets.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the n-byte match as long as possible.
l := matched
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
// Index match start+1 (long) -> s - 1
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
off += 2
}
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
// EncodeNoHist will encode a block with no history and no following blocks.
// Most notable difference is that src will not be copied for history and
// we do not need to check for max match length.
func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
e.ensureHist(len(src))
e.Encode(blk, src)
}
// Encode improves compression...
func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
const (
// Input margin is the number of bytes we read (8)
// and the maximum we will read ahead (2)
inputMargin = 8 + 2
minNonLiteralBlockSize = 16
)
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = prevEntry{}
}
e.cur = e.maxMatchOff
e.allDirty = true
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.table[i].offset = v
}
for i := range e.longTable[:] {
v := e.longTable[i].offset
v2 := e.longTable[i].prev
if v < minOff {
v = 0
v2 = 0
} else {
v = v - e.cur + e.maxMatchOff
if v2 < minOff {
v2 = 0
} else {
v2 = v2 - e.cur + e.maxMatchOff
}
}
e.longTable[i] = prevEntry{
offset: v,
prev: v2,
}
}
e.allDirty = true
e.cur = e.maxMatchOff
break
}
s := e.addBlock(src)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
const stepSize = 1
const kSearchStrength = 9
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
const repOff = 1
repIndex := s - offset1 + repOff
off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS)
index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
// Index skipped...
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
}
cv = load6432(src, s)
continue
}
const repOff2 = 1
// We deviate from the reference encoder and also check offset 2.
// Still slower and not much better, so disabled.
// repIndex = s - offset2 + repOff2
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff2
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 2
seq.offset = 2
if debugSequences {
println("repeat sequence 2", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
// Index skipped...
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
}
cv = load6432(src, s)
// Swap offsets
offset1, offset2 = offset2, offset1
continue
}
}
// Find the offsets of our two matches.
coffsetL := candidateL.offset - e.cur
coffsetLP := candidateL.prev - e.cur
// Check if we have a long match.
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
// Found a long match, at least 8 bytes.
matched = e.matchlen(s+8, coffsetL+8, src) + 8
t = coffsetL
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
// Found a long match, at least 8 bytes.
prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8
if prevMatch > matched {
matched = prevMatch
t = coffsetLP
}
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
}
break
}
// Check if we have a long match on prev.
if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) {
// Found a long match, at least 8 bytes.
matched = e.matchlen(s+8, coffsetLP+8, src) + 8
t = coffsetLP
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
break
}
coffsetS := candidateS.offset - e.cur
// Check if we have a short match.
if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
// found a regular match
matched = e.matchlen(s+4, coffsetS+4, src) + 4
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hashLen(cv, betterLongTableBits, betterLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = candidateL.offset - e.cur
// We can store it, since we have at least a 4 byte match.
e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset}
e.markLongShardDirty(nextHashL)
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
// Found a long match, at least 8 bytes.
matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
if matchedNext > matched {
t = coffsetL
s += checkAt
matched = matchedNext
if debugMatches {
println("long match (after short)")
}
break
}
}
// Check prev long...
coffsetL = candidateL.prev - e.cur
if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) {
// Found a long match, at least 8 bytes.
matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8
if matchedNext > matched {
t = coffsetL
s += checkAt
matched = matchedNext
if debugMatches {
println("prev long match (after short)")
}
break
}
}
t = coffsetS
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
if debugMatches {
println("short match")
}
break
}
// No match found, move forward in input.
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
cv := load3232(src, s)
candidateL := e.longTable[nextHashL]
coffsetL := candidateL.offset - e.cur - matched
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
}
}
}
// Check prev long...
if true {
coffsetL = candidateL.prev - e.cur - matched
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
}
}
}
}
}
// A match has been found. Update recent offsets.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the n-byte match as long as possible.
l := matched
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
// Index match start+1 (long) -> s - 1
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
off += 2
}
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.markShortShardDirty(nextHashS)
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
// ResetDict will reset and set a dictionary if not nil
func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d != nil {
panic("betterFastEncoder: Reset with dict")
}
}
// ResetDict will reset and set a dictionary if not nil
func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d == nil {
return
}
// Init or copy dict table
if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
if len(e.dictTable) != len(e.table) {
e.dictTable = make([]tableEntry, len(e.table))
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff; i < end; i += 4 {
const hashLog = betterShortTableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4
nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5
nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6
nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
}
e.dictTable[nextHash1] = tableEntry{
val: uint32(cv >> 8),
offset: i + 1,
}
e.dictTable[nextHash2] = tableEntry{
val: uint32(cv >> 16),
offset: i + 2,
}
e.dictTable[nextHash3] = tableEntry{
val: uint32(cv >> 24),
offset: i + 3,
}
}
e.lastDictID = d.id
e.allDirty = true
}
// Init or copy dict table
if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
if len(e.dictLongTable) != len(e.longTable) {
e.dictLongTable = make([]prevEntry, len(e.longTable))
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{
offset: e.maxMatchOff,
prev: e.dictLongTable[h].offset,
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
off := 8 // First to read
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[off]) << 56)
h := hashLen(cv, betterLongTableBits, betterLongLen)
e.dictLongTable[h] = prevEntry{
offset: i,
prev: e.dictLongTable[h].offset,
}
off++
}
}
e.lastDictID = d.id
e.allDirty = true
}
// Reset table to initial state
{
dirtyShardCnt := 0
if !e.allDirty {
for i := range e.shortTableShardDirty {
if e.shortTableShardDirty[i] {
dirtyShardCnt++
}
}
}
const shardCnt = betterShortTableShardCnt
const shardSize = betterShortTableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
copy(e.table[:], e.dictTable)
for i := range e.shortTableShardDirty {
e.shortTableShardDirty[i] = false
}
} else {
for i := range e.shortTableShardDirty {
if !e.shortTableShardDirty[i] {
continue
}
copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
e.shortTableShardDirty[i] = false
}
}
}
{
dirtyShardCnt := 0
if !e.allDirty {
for i := range e.shortTableShardDirty {
if e.shortTableShardDirty[i] {
dirtyShardCnt++
}
}
}
const shardCnt = betterLongTableShardCnt
const shardSize = betterLongTableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
copy(e.longTable[:], e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
} else {
for i := range e.longTableShardDirty {
if !e.longTableShardDirty[i] {
continue
}
copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize])
e.longTableShardDirty[i] = false
}
}
}
e.cur = e.maxMatchOff
e.allDirty = false
}
func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) {
e.longTableShardDirty[entryNum/betterLongTableShardSize] = true
}
func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) {
e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import "fmt"
const (
dFastLongTableBits = 17 // Bits used in the long match table
dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
dFastLongLen = 8 // Bytes used for table hash
dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table
dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard
dFastShortTableBits = tableBits // Bits used in the short match table
dFastShortTableSize = 1 << dFastShortTableBits // Size of the table
dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
dFastShortLen = 5 // Bytes used for table hash
)
type doubleFastEncoder struct {
fastEncoder
longTable [dFastLongTableSize]tableEntry
}
type doubleFastEncoderDict struct {
fastEncoderDict
longTable [dFastLongTableSize]tableEntry
dictLongTable []tableEntry
longTableShardDirty [dLongTableShardCnt]bool
}
// Encode mimmics functionality in zstd_dfast.c
func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
const (
// Input margin is the number of bytes we read (8)
// and the maximum we will read ahead (2)
inputMargin = 8 + 2
minNonLiteralBlockSize = 16
)
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
e.table = [dFastShortTableSize]tableEntry{}
e.longTable = [dFastLongTableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.table[i].offset = v
}
for i := range e.longTable[:] {
v := e.longTable[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.longTable[i].offset = v
}
e.cur = e.maxMatchOff
break
}
s := e.addBlock(src)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
const stepSize = 1
const kSearchStrength = 8
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
for {
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
const repOff = 1
repIndex := s - offset1 + repOff
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
e.longTable[nextHashL] = entry
e.table[nextHashS] = entry
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
continue
}
}
// Find the offsets of our two matches.
coffsetL := s - (candidateL.offset - e.cur)
coffsetS := s - (candidateS.offset - e.cur)
// Check if we have a long match.
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
// Found a long match, likely at least 8 bytes.
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
break
}
// Check if we have a short match.
if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
// found a regular match
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
// We can store it, since we have at least a 4 byte match.
e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
// Found a long match, likely at least 8 bytes.
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
s += checkAt
if debugMatches {
println("long match (after short)")
}
break
}
t = candidateS.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
if debugMatches {
println("short match")
}
break
}
// No match found, move forward in input.
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the 4-byte match as long as possible.
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
// Index match start+1 (long) and start+2 (short)
index0 := s - l + 1
// Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load6432(src, index0)
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
e.longTable[nextHashL] = entry
e.table[nextHashS] = entry
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
// EncodeNoHist will encode a block with no history and no following blocks.
// Most notable difference is that src will not be copied for history and
// we do not need to check for max match length.
func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
const (
// Input margin is the number of bytes we read (8)
// and the maximum we will read ahead (2)
inputMargin = 8 + 2
minNonLiteralBlockSize = 16
)
// Protect against e.cur wraparound.
if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = tableEntry{}
}
e.cur = e.maxMatchOff
}
s := int32(0)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
const stepSize = 1
const kSearchStrength = 8
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
var t int32
for {
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
const repOff = 1
repIndex := s - offset1 + repOff
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
e.longTable[nextHashL] = entry
e.table[nextHashS] = entry
if len(blk.sequences) > 2 {
if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
//length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
continue
}
}
// Find the offsets of our two matches.
coffsetL := s - (candidateL.offset - e.cur)
coffsetS := s - (candidateS.offset - e.cur)
// Check if we have a long match.
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
// Found a long match, likely at least 8 bytes.
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
break
}
// Check if we have a short match.
if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
// found a regular match
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
// We can store it, since we have at least a 4 byte match.
e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
// Found a long match, likely at least 8 bytes.
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
s += checkAt
if debugMatches {
println("long match (after short)")
}
break
}
t = candidateS.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
if debugMatches {
println("short match")
}
break
}
// No match found, move forward in input.
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
// Extend the 4-byte match as long as possible.
//l := e.matchlen(s+4, t+4, src) + 4
l := int32(matchLen(src[s+4:], src[t+4:])) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
// Write our sequence
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
// Index match start+1 (long) and start+2 (short)
index0 := s - l + 1
// Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load6432(src, index0)
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0
e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0
e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1
cv = load6432(src, s)
if len(blk.sequences) <= 2 {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
//l := 4 + e.matchlen(s+4, o2+4, src)
l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
e.longTable[nextHashL] = entry
e.table[nextHashS] = entry
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
// Encode will encode the content, with a dictionary if initialized for it.
func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
const (
// Input margin is the number of bytes we read (8)
// and the maximum we will read ahead (2)
inputMargin = 8 + 2
minNonLiteralBlockSize = 16
)
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.longTable[:] {
e.longTable[i] = tableEntry{}
}
e.markAllShardsDirty()
e.cur = e.maxMatchOff
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.table[i].offset = v
}
for i := range e.longTable[:] {
v := e.longTable[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.longTable[i].offset = v
}
e.markAllShardsDirty()
e.cur = e.maxMatchOff
break
}
s := e.addBlock(src)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 1.
const stepSize = 1
const kSearchStrength = 8
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
for {
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
const repOff = 1
repIndex := s - offset1 + repOff
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
e.longTable[nextHashL] = entry
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = entry
e.markShardDirty(nextHashS)
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + repOff
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
continue
}
}
// Find the offsets of our two matches.
coffsetL := s - (candidateL.offset - e.cur)
coffsetS := s - (candidateS.offset - e.cur)
// Check if we have a long match.
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
// Found a long match, likely at least 8 bytes.
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugMatches {
println("long match")
}
break
}
// Check if we have a short match.
if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
// found a regular match
// See if we can find a long match at s+1
const checkAt = 1
cv := load6432(src, s+checkAt)
nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen)
candidateL = e.longTable[nextHashL]
coffsetL = s - (candidateL.offset - e.cur) + checkAt
// We can store it, since we have at least a 4 byte match.
e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
e.markLongShardDirty(nextHashL)
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
// Found a long match, likely at least 8 bytes.
// Reference encoder checks all 8 bytes, we only check 4,
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
t = candidateL.offset - e.cur
s += checkAt
if debugMatches {
println("long match (after short)")
}
break
}
t = candidateS.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
if debugMatches {
println("short match")
}
break
}
// No match found, move forward in input.
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the 4-byte match as long as possible.
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
// Index match start+1 (long) and start+2 (short)
index0 := s - l + 1
// Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load6432(src, index0)
cv1 := load6432(src, index1)
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
e.markLongShardDirty(longHash2)
cv0 >>= 8
cv1 >>= 8
te0.offset++
te1.offset++
te0.val = uint32(cv0)
te1.val = uint32(cv1)
hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen)
hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen)
e.table[hashVal1] = te0
e.markShardDirty(hashVal1)
e.table[hashVal2] = te1
e.markShardDirty(hashVal2)
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
e.longTable[nextHashL] = entry
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = entry
e.markShardDirty(nextHashS)
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// If we encoded more than 64K mark all dirty.
if len(src) > 64<<10 {
e.markAllShardsDirty()
}
}
// ResetDict will reset and set a dictionary if not nil
func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) {
e.fastEncoder.Reset(d, singleBlock)
if d != nil {
panic("doubleFastEncoder: Reset with dict not supported")
}
}
// ResetDict will reset and set a dictionary if not nil
func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
allDirty := e.allDirty
e.fastEncoderDict.Reset(d, singleBlock)
if d == nil {
return
}
// Init or copy dict table
if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID {
if len(e.dictLongTable) != len(e.longTable) {
e.dictLongTable = make([]tableEntry, len(e.longTable))
}
if len(d.content) >= 8 {
cv := load6432(d.content, 0)
e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: e.maxMatchOff,
}
end := int32(len(d.content)) - 8 + e.maxMatchOff
for i := e.maxMatchOff + 1; i < end; i++ {
cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56)
e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{
val: uint32(cv),
offset: i,
}
}
}
e.lastDictID = d.id
allDirty = true
}
// Reset table to initial state
e.cur = e.maxMatchOff
dirtyShardCnt := 0
if !allDirty {
for i := range e.longTableShardDirty {
if e.longTableShardDirty[i] {
dirtyShardCnt++
}
}
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
//copy(e.longTable[:], e.dictLongTable)
e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
return
}
for i := range e.longTableShardDirty {
if !e.longTableShardDirty[i] {
continue
}
// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
e.longTableShardDirty[i] = false
}
}
func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) {
e.longTableShardDirty[entryNum/dLongTableShardSize] = true
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"fmt"
)
const (
tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table
tableShardSize = tableSize / tableShardCnt // Size of an individual shard
tableFastHashLen = 6
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
maxMatchLength = 131074
)
type tableEntry struct {
val uint32
offset int32
}
type fastEncoder struct {
fastBase
table [tableSize]tableEntry
}
type fastEncoderDict struct {
fastEncoder
dictTable []tableEntry
tableShardDirty [tableShardCnt]bool
allDirty bool
}
// Encode mimmics functionality in zstd_fast.c
func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
const (
inputMargin = 8
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = e.maxMatchOff
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.table[i].offset = v
}
e.cur = e.maxMatchOff
break
}
s := e.addBlock(src)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 2.
const stepSize = 2
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
// t will contain the match offset when we find one.
// When existing the search loop, we have already checked 4 bytes.
var t int32
// We will not use repeat offsets across blocks.
// By not using them for the first 3 matches
canRepeat := len(blk.sequences) > 2
for {
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + 2
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
sMin := s - e.maxMatchOff
if sMin < 0 {
sMin = 0
}
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + 2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
continue
}
coffset0 := s - (candidate.offset - e.cur)
coffset1 := s - (candidate2.offset - e.cur) + 1
if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
// found a regular match
t = candidate.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
break
}
if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
// found a regular match
t = candidate2.offset - e.cur
s++
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
break
}
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// A 4-byte match has been found. We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the 4-byte match as long as possible.
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence.
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
// Don't use repeat offsets
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
// Check offset 2
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
break encodeLoop
}
// Prepare next loop.
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
// EncodeNoHist will encode a block with no history and no following blocks.
// Most notable difference is that src will not be copied for history and
// we do not need to check for max match length.
func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
const (
inputMargin = 8
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
if debugEncoder {
if len(src) > maxCompressedBlockSize {
panic("src too big")
}
}
// Protect against e.cur wraparound.
if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = e.maxMatchOff
}
s := int32(0)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 2.
const stepSize = 2
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
// t will contain the match offset when we find one.
// When existing the search loop, we have already checked 4 bytes.
var t int32
// We will not use repeat offsets across blocks.
// By not using them for the first 3 matches
for {
nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + 2
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
sMin := s - e.maxMatchOff
if sMin < 0 {
sMin = 0
}
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + 2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
continue
}
coffset0 := s - (candidate.offset - e.cur)
coffset1 := s - (candidate2.offset - e.cur) + 1
if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
// found a regular match
t = candidate.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff))
}
break
}
if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
// found a regular match
t = candidate2.offset - e.cur
s++
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
break
}
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// A 4-byte match has been found. We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && t < 0 {
panic(fmt.Sprintf("t (%d) < 0 ", t))
}
// Extend the 4-byte match as long as possible.
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
// Write our sequence.
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
// Don't use repeat offsets
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
// Check offset 2
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
break encodeLoop
}
// Prepare next loop.
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
// Encode will encode the content, with a dictionary if initialized for it.
func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
const (
inputMargin = 8
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
if e.allDirty || len(src) > 32<<10 {
e.fastEncoder.Encode(blk, src)
e.allDirty = true
return
}
// Protect against e.cur wraparound.
for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
e.table = [tableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff
for i := range e.table[:] {
v := e.table[i].offset
if v < minOff {
v = 0
} else {
v = v - e.cur + e.maxMatchOff
}
e.table[i].offset = v
}
e.cur = e.maxMatchOff
break
}
s := e.addBlock(src)
blk.size = len(src)
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
copy(blk.literals, src)
return
}
// Override src
src = e.hist
sLimit := int32(len(src)) - inputMargin
// stepSize is the number of bytes to skip on every main loop iteration.
// It should be >= 2.
const stepSize = 2
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 7
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
offset2 := int32(blk.recentOffsets[1])
addLiterals := func(s *seq, until int32) {
if until == nextEmit {
return
}
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
}
encodeLoop:
for {
// t will contain the match offset when we find one.
// When existing the search loop, we have already checked 4 bytes.
var t int32
// We will not use repeat offsets across blocks.
// By not using them for the first 3 matches
canRepeat := len(blk.sequences) > 2
for {
if debugAsserts && canRepeat && offset1 == 0 {
panic("offset0 was 0")
}
nextHash := hashLen(cv, hashLog, tableFastHashLen)
nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen)
candidate := e.table[nextHash]
candidate2 := e.table[nextHash2]
repIndex := s - offset1 + 2
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.markShardDirty(nextHash)
e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
e.markShardDirty(nextHash2)
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := s + 2
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
sMin := s - e.maxMatchOff
if sMin < 0 {
sMin = 0
}
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
// rep 0
seq.offset = 1
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
s += length + 2
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, length)
}
break encodeLoop
}
cv = load6432(src, s)
continue
}
coffset0 := s - (candidate.offset - e.cur)
coffset1 := s - (candidate2.offset - e.cur) + 1
if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
// found a regular match
t = candidate.offset - e.cur
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
break
}
if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
// found a regular match
t = candidate2.offset - e.cur
s++
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && s-t > e.maxMatchOff {
panic("s - t >e.maxMatchOff")
}
if debugAsserts && t < 0 {
panic("t<0")
}
break
}
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
}
// A 4-byte match has been found. We'll later see if more than 4 bytes.
offset2 = offset1
offset1 = s - t
if debugAsserts && s <= t {
panic(fmt.Sprintf("s (%d) <= t (%d)", s, t))
}
if debugAsserts && canRepeat && int(offset1) > len(src) {
panic("invalid offset")
}
// Extend the 4-byte match as long as possible.
l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence.
var seq seq
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
blk.literals = append(blk.literals, src[nextEmit:s]...)
}
// Don't use repeat offsets
seq.offset = uint32(s-t) + 3
s += l
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
// Check offset 2
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
e.markShardDirty(nextHash)
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
break encodeLoop
}
// Prepare next loop.
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {
blk.literals = append(blk.literals, src[nextEmit:]...)
blk.extraLits = len(src) - int(nextEmit)
}
blk.recentOffsets[0] = uint32(offset1)
blk.recentOffsets[1] = uint32(offset2)
if debugEncoder {
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
}
// ResetDict will reset and set a dictionary if not nil
func (e *fastEncoder) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d != nil {
panic("fastEncoder: Reset with dict")
}
}
// ResetDict will reset and set a dictionary if not nil
func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
e.resetBase(d, singleBlock)
if d == nil {
return
}
// Init or copy dict table
if len(e.dictTable) != len(e.table) || d.id != e.lastDictID {
if len(e.dictTable) != len(e.table) {
e.dictTable = make([]tableEntry, len(e.table))
}
if true {
end := e.maxMatchOff + int32(len(d.content)) - 8
for i := e.maxMatchOff; i < end; i += 2 {
const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
}
e.dictTable[nextHash1] = tableEntry{
val: uint32(cv >> 8),
offset: i + 1,
}
}
}
e.lastDictID = d.id
e.allDirty = true
}
e.cur = e.maxMatchOff
dirtyShardCnt := 0
if !e.allDirty {
for i := range e.tableShardDirty {
if e.tableShardDirty[i] {
dirtyShardCnt++
}
}
}
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
//copy(e.table[:], e.dictTable)
e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
e.allDirty = false
return
}
for i := range e.tableShardDirty {
if !e.tableShardDirty[i] {
continue
}
//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
}
func (e *fastEncoderDict) markAllShardsDirty() {
e.allDirty = true
}
func (e *fastEncoderDict) markShardDirty(entryNum uint32) {
e.tableShardDirty[entryNum/tableShardSize] = true
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"crypto/rand"
"errors"
"fmt"
"io"
"math"
rdebug "runtime/debug"
"sync"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
// Encoder provides encoding to Zstandard.
// An Encoder can be used for either compressing a stream via the
// io.WriteCloser interface supported by the Encoder or as multiple independent
// tasks via the EncodeAll function.
// Smaller encodes are encouraged to use the EncodeAll function.
// Use NewWriter to create a new instance.
type Encoder struct {
o encoderOptions
encoders chan encoder
state encoderState
init sync.Once
}
type encoder interface {
Encode(blk *blockEnc, src []byte)
EncodeNoHist(blk *blockEnc, src []byte)
Block() *blockEnc
CRC() *xxhash.Digest
AppendCRC([]byte) []byte
WindowSize(size int64) int32
UseBlock(*blockEnc)
Reset(d *dict, singleBlock bool)
}
type encoderState struct {
w io.Writer
filling []byte
current []byte
previous []byte
encoder encoder
writing *blockEnc
err error
writeErr error
nWritten int64
nInput int64
frameContentSize int64
headerWritten bool
eofWritten bool
fullFrameWritten bool
// This waitgroup indicates an encode is running.
wg sync.WaitGroup
// This waitgroup indicates we have a block encoding/writing.
wWg sync.WaitGroup
}
// NewWriter will create a new Zstandard encoder.
// If the encoder will be used for encoding blocks a nil writer can be used.
func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) {
initPredefined()
var e Encoder
e.o.setDefault()
for _, o := range opts {
err := o(&e.o)
if err != nil {
return nil, err
}
}
if w != nil {
e.Reset(w)
}
return &e, nil
}
func (e *Encoder) initialize() {
if e.o.concurrent == 0 {
e.o.setDefault()
}
e.encoders = make(chan encoder, e.o.concurrent)
for i := 0; i < e.o.concurrent; i++ {
enc := e.o.encoder()
e.encoders <- enc
}
}
// Reset will re-initialize the writer and new writes will encode to the supplied writer
// as a new, independent stream.
func (e *Encoder) Reset(w io.Writer) {
s := &e.state
s.wg.Wait()
s.wWg.Wait()
if cap(s.filling) == 0 {
s.filling = make([]byte, 0, e.o.blockSize)
}
if e.o.concurrent > 1 {
if cap(s.current) == 0 {
s.current = make([]byte, 0, e.o.blockSize)
}
if cap(s.previous) == 0 {
s.previous = make([]byte, 0, e.o.blockSize)
}
s.current = s.current[:0]
s.previous = s.previous[:0]
if s.writing == nil {
s.writing = &blockEnc{lowMem: e.o.lowMem}
s.writing.init()
}
s.writing.initNewEncode()
}
if s.encoder == nil {
s.encoder = e.o.encoder()
}
s.filling = s.filling[:0]
s.encoder.Reset(e.o.dict, false)
s.headerWritten = false
s.eofWritten = false
s.fullFrameWritten = false
s.w = w
s.err = nil
s.nWritten = 0
s.nInput = 0
s.writeErr = nil
s.frameContentSize = 0
}
// ResetContentSize will reset and set a content size for the next stream.
// If the bytes written does not match the size given an error will be returned
// when calling Close().
// This is removed when Reset is called.
// Sizes <= 0 results in no content size set.
func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
e.Reset(w)
if size >= 0 {
e.state.frameContentSize = size
}
}
// Write data to the encoder.
// Input data will be buffered and as the buffer fills up
// content will be compressed and written to the output.
// When done writing, use Close to flush the remaining output
// and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state
if s.eofWritten {
return 0, ErrEncoderClosed
}
for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc {
_, _ = s.encoder.CRC().Write(p)
}
s.filling = append(s.filling, p...)
return n + len(p), nil
}
add := p
if len(p)+len(s.filling) > e.o.blockSize {
add = add[:e.o.blockSize-len(s.filling)]
}
if e.o.crc {
_, _ = s.encoder.CRC().Write(add)
}
s.filling = append(s.filling, add...)
p = p[len(add):]
n += len(add)
if len(s.filling) < e.o.blockSize {
return n, nil
}
err := e.nextBlock(false)
if err != nil {
return n, err
}
if debugAsserts && len(s.filling) > 0 {
panic(len(s.filling))
}
}
return n, nil
}
// nextBlock will synchronize and start compressing input in e.state.filling.
// If an error has occurred during encoding it will be returned.
func (e *Encoder) nextBlock(final bool) error {
s := &e.state
// Wait for current block.
s.wg.Wait()
if s.err != nil {
return s.err
}
if len(s.filling) > e.o.blockSize {
return fmt.Errorf("block > maxStoreBlockSize")
}
if !s.headerWritten {
// If we have a single block encode, do a sync compression.
if final && len(s.filling) == 0 && !e.o.fullZero {
s.headerWritten = true
s.fullFrameWritten = true
s.eofWritten = true
return nil
}
if final && len(s.filling) > 0 {
s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
return s.err
}
s.nWritten += int64(n2)
s.nInput += int64(len(s.filling))
s.current = s.current[:0]
s.filling = s.filling[:0]
s.headerWritten = true
s.fullFrameWritten = true
s.eofWritten = true
return nil
}
var tmp [maxHeaderSize]byte
fh := frameHeader{
ContentSize: uint64(s.frameContentSize),
WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)),
SingleSegment: false,
Checksum: e.o.crc,
DictID: e.o.dict.ID(),
}
dst := fh.appendTo(tmp[:0])
s.headerWritten = true
s.wWg.Wait()
var n2 int
n2, s.err = s.w.Write(dst)
if s.err != nil {
return s.err
}
s.nWritten += int64(n2)
}
if s.eofWritten {
// Ensure we only write it once.
final = false
}
if len(s.filling) == 0 {
// Final block, but no data.
if final {
enc := s.encoder
blk := enc.Block()
blk.reset(nil)
blk.last = true
blk.encodeRaw(nil)
s.wWg.Wait()
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
s.eofWritten = true
}
return s.err
}
// SYNC:
if e.o.concurrent == 1 {
src := s.filling
s.nInput += int64(len(s.filling))
if debugEncoder {
println("Adding sync block,", len(src), "bytes, final:", final)
}
enc := s.encoder
blk := enc.Block()
blk.reset(nil)
enc.Encode(blk, src)
blk.last = final
if final {
s.eofWritten = true
}
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.err != nil {
return s.err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
s.filling = s.filling[:0]
return s.err
}
// Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
if final {
s.eofWritten = true
}
go func(src []byte) {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
}
defer func() {
if r := recover(); r != nil {
s.err = fmt.Errorf("panic while encoding: %v", r)
rdebug.PrintStack()
}
s.wg.Done()
}()
enc := s.encoder
blk := enc.Block()
enc.Encode(blk, src)
blk.last = final
// Wait for pending writes.
s.wWg.Wait()
if s.writeErr != nil {
s.err = s.writeErr
return
}
// Transfer encoders from previous write block.
blk.swapEncoders(s.writing)
// Transfer recent offsets to next.
enc.UseBlock(s.writing)
s.writing = blk
s.wWg.Add(1)
go func() {
defer func() {
if r := recover(); r != nil {
s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r)
rdebug.PrintStack()
}
s.wWg.Done()
}()
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.writeErr != nil {
return
}
_, s.writeErr = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
}()
}(s.current)
return nil
}
// ReadFrom reads data from r until EOF or error.
// The return value n is the number of bytes read.
// Any error except io.EOF encountered during the read is also returned.
//
// The Copy function uses ReaderFrom if available.
func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) {
if debugEncoder {
println("Using ReadFrom")
}
// Flush any current writes.
if len(e.state.filling) > 0 {
if err := e.nextBlock(false); err != nil {
return 0, err
}
}
e.state.filling = e.state.filling[:e.o.blockSize]
src := e.state.filling
for {
n2, err := r.Read(src)
if e.o.crc {
_, _ = e.state.encoder.CRC().Write(src[:n2])
}
// src is now the unfilled part...
src = src[n2:]
n += int64(n2)
switch err {
case io.EOF:
e.state.filling = e.state.filling[:len(e.state.filling)-len(src)]
if debugEncoder {
println("ReadFrom: got EOF final block:", len(e.state.filling))
}
return n, nil
case nil:
default:
if debugEncoder {
println("ReadFrom: got error:", err)
}
e.state.err = err
return n, err
}
if len(src) > 0 {
if debugEncoder {
println("ReadFrom: got space left in source:", len(src))
}
continue
}
err = e.nextBlock(false)
if err != nil {
return n, err
}
e.state.filling = e.state.filling[:e.o.blockSize]
src = e.state.filling
}
}
// Flush will send the currently written data to output
// and block until everything has been written.
// This should only be used on rare occasions where pushing the currently queued data is critical.
func (e *Encoder) Flush() error {
s := &e.state
if len(s.filling) > 0 {
err := e.nextBlock(false)
if err != nil {
// Ignore Flush after Close.
if errors.Is(s.err, ErrEncoderClosed) {
return nil
}
return err
}
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
// Ignore Flush after Close.
if errors.Is(s.err, ErrEncoderClosed) {
return nil
}
return s.err
}
return s.writeErr
}
// Close will flush the final output and close the stream.
// The function will block until everything has been written.
// The Encoder can still be re-used after calling this.
func (e *Encoder) Close() error {
s := &e.state
if s.encoder == nil {
return nil
}
err := e.nextBlock(true)
if err != nil {
if errors.Is(s.err, ErrEncoderClosed) {
return nil
}
return err
}
if s.frameContentSize > 0 {
if s.nInput != s.frameContentSize {
return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput)
}
}
if e.state.fullFrameWritten {
return s.err
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
return s.err
}
if s.writeErr != nil {
return s.writeErr
}
// Write CRC
if e.o.crc && s.err == nil {
// heap alloc.
var tmp [4]byte
_, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0]))
s.nWritten += 4
}
// Add padding with content from crypto/rand.Reader
if s.err == nil && e.o.pad > 0 {
add := calcSkippableFrame(s.nWritten, int64(e.o.pad))
frame, err := skippableFrame(s.filling[:0], add, rand.Reader)
if err != nil {
return err
}
_, s.err = s.w.Write(frame)
}
if s.err == nil {
s.err = ErrEncoderClosed
return nil
}
return s.err
}
// EncodeAll will encode all input in src and append it to dst.
// This function can be called concurrently, but each call will only run on a single goroutine.
// If empty input is given, nothing is returned, unless WithZeroFrames is specified.
// Encoded blocks can be concatenated and the result will be the combined input stream.
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
e.init.Do(e.initialize)
enc := <-e.encoders
defer func() {
e.encoders <- enc
}()
return e.encodeAll(enc, src, dst)
}
func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
fh := frameHeader{
ContentSize: 0,
WindowSize: MinWindowSize,
SingleSegment: true,
// Adding a checksum would be a waste of space.
Checksum: false,
DictID: 0,
}
dst = fh.appendTo(dst)
// Write raw block as last one only.
var blk blockHeader
blk.setSize(0)
blk.setType(blockTypeRaw)
blk.setLast(true)
dst = blk.appendTo(dst)
}
return dst
}
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
single = *e.o.single
}
fh := frameHeader{
ContentSize: uint64(len(src)),
WindowSize: uint32(enc.WindowSize(int64(len(src)))),
SingleSegment: single,
Checksum: e.o.crc,
DictID: e.o.dict.ID(),
}
// If less than 1MB, allocate a buffer up front.
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
dst = make([]byte, 0, len(src))
}
dst = fh.appendTo(dst)
// If we can do everything in one block, prefer that.
if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {
_, _ = enc.CRC().Write(src)
}
blk := enc.Block()
blk.last = true
if e.o.dict == nil {
enc.EncodeNoHist(blk, src)
} else {
enc.Encode(blk, src)
}
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
oldout := blk.output
// Output directly to dst
blk.output = dst
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = blk.output
blk.output = oldout
} else {
enc.Reset(e.o.dict, false)
blk := enc.Block()
for len(src) > 0 {
todo := src
if len(todo) > e.o.blockSize {
todo = todo[:e.o.blockSize]
}
src = src[len(todo):]
if e.o.crc {
_, _ = enc.CRC().Write(todo)
}
blk.pushOffsets()
enc.Encode(blk, todo)
if len(src) == 0 {
blk.last = true
}
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = append(dst, blk.output...)
blk.reset(nil)
}
}
if e.o.crc {
dst = enc.AppendCRC(dst)
}
// Add padding with content from crypto/rand.Reader
if e.o.pad > 0 {
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
var err error
dst, err = skippableFrame(dst, add, rand.Reader)
if err != nil {
panic(err)
}
}
return dst
}
// MaxEncodedSize returns the expected maximum
// size of an encoded block or stream.
func (e *Encoder) MaxEncodedSize(size int) int {
frameHeader := 4 + 2 // magic + frame header & window descriptor
if e.o.dict != nil {
frameHeader += 4
}
// Frame content size:
if size < 256 {
frameHeader++
} else if size < 65536+256 {
frameHeader += 2
} else if size < math.MaxInt32 {
frameHeader += 4
} else {
frameHeader += 8
}
// Final crc
if e.o.crc {
frameHeader += 4
}
// Max overhead is 3 bytes/block.
// There cannot be 0 blocks.
blocks := (size + e.o.blockSize) / e.o.blockSize
// Combine, add padding.
maxSz := frameHeader + 3*blocks + size
if e.o.pad > 1 {
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
}
return maxSz
}
package zstd
import (
"errors"
"fmt"
"math"
"math/bits"
"runtime"
"strings"
)
// EOption is an option for creating a encoder.
type EOption func(*encoderOptions) error
// options retains accumulated state of multiple options.
type encoderOptions struct {
concurrent int
level EncoderLevel
single *bool
pad int
blockSize int
windowSize int
crc bool
fullZero bool
noEntropy bool
allLitEntropy bool
customWindow bool
customALEntropy bool
customBlockSize bool
lowMem bool
dict *dict
}
func (o *encoderOptions) setDefault() {
*o = encoderOptions{
concurrent: runtime.GOMAXPROCS(0),
crc: true,
single: nil,
blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: false,
lowMem: false,
}
}
// encoder returns an encoder with the selected options.
func (o encoderOptions) encoder() encoder {
switch o.level {
case SpeedFastest:
if o.dict != nil {
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault:
if o.dict != nil {
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
}
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression:
if o.dict != nil {
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression:
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
}
panic("unknown compression level")
}
// WithEncoderCRC will add CRC value to output.
// Output will be 4 bytes larger.
func WithEncoderCRC(b bool) EOption {
return func(o *encoderOptions) error { o.crc = b; return nil }
}
// WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently.
// The value supplied must be at least 1.
// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS.
func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error {
if n <= 0 {
return fmt.Errorf("concurrency must be at least 1")
}
o.concurrent = n
return nil
}
}
// WithWindowSize will set the maximum allowed back-reference distance.
// The value must be a power of two between MinWindowSize and MaxWindowSize.
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
// The default value is determined by the compression level and max 8MB.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
case n < MinWindowSize:
return fmt.Errorf("window size must be at least %d", MinWindowSize)
case n > MaxWindowSize:
return fmt.Errorf("window size must be at most %d", MaxWindowSize)
case (n & (n - 1)) != 0:
return errors.New("window size must be a power of 2")
}
o.windowSize = n
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
o.customBlockSize = true
}
return nil
}
}
// WithEncoderPadding will add padding to all output so the size will be a multiple of n.
// This can be used to obfuscate the exact output size or make blocks of a certain size.
// The contents will be a skippable frame, so it will be invisible by the decoder.
// n must be > 0 and <= 1GB, 1<<30 bytes.
// The padded area will be filled with data from crypto/rand.Reader.
// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this.
func WithEncoderPadding(n int) EOption {
return func(o *encoderOptions) error {
if n <= 0 {
return fmt.Errorf("padding must be at least 1")
}
// No need to waste our time.
if n == 1 {
n = 0
}
if n > 1<<30 {
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
}
o.pad = n
return nil
}
}
// EncoderLevel predefines encoder compression levels.
// Only use the constants made available, since the actual mapping
// of these values are very likely to change and your compression could change
// unpredictably when upgrading the library.
type EncoderLevel int
const (
speedNotSet EncoderLevel = iota
// SpeedFastest will choose the fastest reasonable compression.
// This is roughly equivalent to the fastest Zstandard mode.
SpeedFastest
// SpeedDefault is the default "pretty fast" compression option.
// This is roughly equivalent to the default Zstandard mode (level 3).
SpeedDefault
// SpeedBetterCompression will yield better compression than the default.
// Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage.
// By using this, notice that CPU usage may go up in the future.
SpeedBetterCompression
// SpeedBestCompression will choose the best available compression option.
// This will offer the best compression no matter the CPU cost.
SpeedBestCompression
// speedLast should be kept as the last actual compression option.
// The is not for external usage, but is used to keep track of the valid options.
speedLast
)
// EncoderLevelFromString will convert a string representation of an encoding level back
// to a compression level. The compare is not case sensitive.
// If the string wasn't recognized, (false, SpeedDefault) will be returned.
func EncoderLevelFromString(s string) (bool, EncoderLevel) {
for l := speedNotSet + 1; l < speedLast; l++ {
if strings.EqualFold(s, l.String()) {
return true, l
}
}
return false, SpeedDefault
}
// EncoderLevelFromZstd will return an encoder level that closest matches the compression
// ratio of a specific zstd compression level.
// Many input values will provide the same compression level.
func EncoderLevelFromZstd(level int) EncoderLevel {
switch {
case level < 3:
return SpeedFastest
case level >= 3 && level < 6:
return SpeedDefault
case level >= 6 && level < 10:
return SpeedBetterCompression
default:
return SpeedBestCompression
}
}
// String provides a string representation of the compression level.
func (e EncoderLevel) String() string {
switch e {
case SpeedFastest:
return "fastest"
case SpeedDefault:
return "default"
case SpeedBetterCompression:
return "better"
case SpeedBestCompression:
return "best"
default:
return "invalid"
}
}
// WithEncoderLevel specifies a predefined compression level.
func WithEncoderLevel(l EncoderLevel) EOption {
return func(o *encoderOptions) error {
switch {
case l <= speedNotSet || l >= speedLast:
return fmt.Errorf("unknown encoder level")
}
o.level = l
if !o.customWindow {
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
if !o.customBlockSize {
o.blockSize = 1 << 16
}
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
o.windowSize = 8 << 20
case SpeedBestCompression:
o.windowSize = 8 << 20
}
}
if !o.customALEntropy {
o.allLitEntropy = l > SpeedDefault
}
return nil
}
}
// WithZeroFrames will encode 0 length input as full frames.
// This can be needed for compatibility with zstandard usage,
// but is not needed for this package.
func WithZeroFrames(b bool) EOption {
return func(o *encoderOptions) error {
o.fullZero = b
return nil
}
}
// WithAllLitEntropyCompression will apply entropy compression if no matches are found.
// Disabling this will skip incompressible data faster, but in cases with no matches but
// skewed character distribution compression is lost.
// Default value depends on the compression level selected.
func WithAllLitEntropyCompression(b bool) EOption {
return func(o *encoderOptions) error {
o.customALEntropy = true
o.allLitEntropy = b
return nil
}
}
// WithNoEntropyCompression will always skip entropy compression of literals.
// This can be useful if content has matches, but unlikely to benefit from entropy
// compression. Usually the slight speed improvement is not worth enabling this.
func WithNoEntropyCompression(b bool) EOption {
return func(o *encoderOptions) error {
o.noEntropy = b
return nil
}
}
// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
// If this flag is set, data must be regenerated within a single continuous memory segment.
// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content.
// In order to preserve the decoder from unreasonable memory requirements,
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
// This setting has no effect on streamed encodes.
func WithSingleSegment(b bool) EOption {
return func(o *encoderOptions) error {
o.single = &b
return nil
}
}
// WithLowerEncoderMem will trade in some memory cases trade less memory usage for
// slower encoding speed.
// This will not change the window size which is the primary function for reducing
// memory usage. See WithWindowSize.
func WithLowerEncoderMem(b bool) EOption {
return func(o *encoderOptions) error {
o.lowMem = b
return nil
}
}
// WithEncoderDict allows to register a dictionary that will be used for the encode.
//
// The slice dict must be in the [dictionary format] produced by
// "zstd --train" from the Zstandard reference implementation.
//
// The encoder *may* choose to use no dictionary instead for certain payloads.
//
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption {
return func(o *encoderOptions) error {
d, err := loadDict(dict)
if err != nil {
return err
}
o.dict = d
return nil
}
}
// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
//
// The slice content may contain arbitrary data. It will be used as an initial
// history.
func WithEncoderDictRaw(id uint32, content []byte) EOption {
return func(o *encoderOptions) error {
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
}
o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
return nil
}
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"encoding/binary"
"encoding/hex"
"errors"
"io"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
type frameDec struct {
o decoderOptions
crc *xxhash.Digest
WindowSize uint64
// Frame history passed between blocks
history history
rawInput byteBuffer
// Byte buffer that can be reused for small input blocks.
bBuf byteBuf
FrameContentSize uint64
DictionaryID uint32
HasCheckSum bool
SingleSegment bool
}
const (
// MinWindowSize is the minimum Window Size, which is 1 KB.
MinWindowSize = 1 << 10
// MaxWindowSize is the maximum encoder window size
// and the default decoder maximum window size.
MaxWindowSize = 1 << 29
)
const (
frameMagic = "\x28\xb5\x2f\xfd"
skippableFrameMagic = "\x2a\x4d\x18"
)
func newFrameDec(o decoderOptions) *frameDec {
if o.maxWindowSize > o.maxDecodedSize {
o.maxWindowSize = o.maxDecodedSize
}
d := frameDec{
o: o,
}
return &d
}
// reset will read the frame header and prepare for block decoding.
// If nothing can be read from the input, io.EOF will be returned.
// Any other error indicated that the stream contained data, but
// there was a problem.
func (d *frameDec) reset(br byteBuffer) error {
d.HasCheckSum = false
d.WindowSize = 0
var signature [4]byte
for {
var err error
// Check if we can read more...
b, err := br.readSmall(1)
switch err {
case io.EOF, io.ErrUnexpectedEOF:
return io.EOF
case nil:
signature[0] = b[0]
default:
return err
}
// Read the rest, don't allow io.ErrUnexpectedEOF
b, err = br.readSmall(3)
switch err {
case io.EOF:
return io.EOF
case nil:
copy(signature[1:], b)
default:
return err
}
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder {
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
}
// Break if not skippable frame.
break
}
// Read size to skip
b, err = br.readSmall(4)
if err != nil {
if debugDecoder {
println("Reading Frame Size", err)
}
return err
}
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.")
err = br.skipN(int64(n))
if err != nil {
if debugDecoder {
println("Reading discarded frame", err)
}
return err
}
}
if string(signature[:]) != frameMagic {
if debugDecoder {
println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
}
return ErrMagicMismatch
}
// Read Frame_Header_Descriptor
fhd, err := br.readByte()
if err != nil {
if debugDecoder {
println("Reading Frame_Header_Descriptor", err)
}
return err
}
d.SingleSegment = fhd&(1<<5) != 0
if fhd&(1<<3) != 0 {
return errors.New("reserved bit set on frame header")
}
// Read Window_Descriptor
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
d.WindowSize = 0
if !d.SingleSegment {
wd, err := br.readByte()
if err != nil {
if debugDecoder {
println("Reading Window_Descriptor", err)
}
return err
}
if debugDecoder {
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
}
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
d.WindowSize = windowBase + windowAdd
}
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
d.DictionaryID = 0
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
}
b, err := br.readSmall(int(size))
if err != nil {
println("Reading Dictionary_ID", err)
return err
}
var id uint32
switch len(b) {
case 1:
id = uint32(b[0])
case 2:
id = uint32(b[0]) | (uint32(b[1]) << 8)
case 4:
id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
}
if debugDecoder {
println("Dict size", size, "ID:", id)
}
d.DictionaryID = id
}
// Read Frame_Content_Size
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size
var fcsSize int
v := fhd >> 6
switch v {
case 0:
if d.SingleSegment {
fcsSize = 1
}
default:
fcsSize = 1 << v
}
d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
println("Reading Frame content", err)
return err
}
switch len(b) {
case 1:
d.FrameContentSize = uint64(b[0])
case 2:
// When FCS_Field_Size is 2, the offset of 256 is added.
d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256
case 4:
d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24)
case 8:
d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24)
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debugDecoder {
println("Read FCS:", d.FrameContentSize)
}
}
// Move this to shared.
d.HasCheckSum = fhd&(1<<2) != 0
if d.HasCheckSum {
if d.crc == nil {
d.crc = xxhash.New()
}
d.crc.Reset()
}
if d.WindowSize > d.o.maxWindowSize {
if debugDecoder {
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
}
return ErrWindowSizeExceeded
}
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
if d.WindowSize < MinWindowSize {
d.WindowSize = MinWindowSize
}
if d.WindowSize > d.o.maxDecodedSize {
if debugDecoder {
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
}
return ErrDecoderSizeExceeded
}
}
// The minimum Window_Size is 1 KB.
if d.WindowSize < MinWindowSize {
if debugDecoder {
println("got window size: ", d.WindowSize)
}
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
// Alloc 2x window size if not low-mem, or window size below 2MB.
d.history.allocFrameBuffer = d.history.windowSize * 2
} else {
if d.o.lowMem {
// Alloc with 1MB extra.
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
} else {
// Alloc with 2MB extra.
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
}
if debugDecoder {
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
}
// history contains input - maybe we do something
d.rawInput = br
return nil
}
// next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error {
if debugDecoder {
println("decoding new block")
}
err := block.reset(d.rawInput, d.WindowSize)
if err != nil {
println("block error:", err)
// Signal the frame decoder we have a problem.
block.sendErr(err)
return err
}
return nil
}
// checkCRC will check the checksum, assuming the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
// We can overwrite upper tmp now
buf, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
want := binary.LittleEndian.Uint32(buf[:4])
got := uint32(d.crc.Sum64())
if got != want {
if debugDecoder {
printf("CRC check failed: got %08x, want %08x\n", got, want)
}
return ErrCRCMismatch
}
if debugDecoder {
printf("CRC ok %08x\n", got)
}
return nil
}
// consumeCRC skips over the checksum, assuming the frame has one.
func (d *frameDec) consumeCRC() error {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
}
return err
}
// runDecoder will run the decoder for the remainder of the frame.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
// We use the history for output to avoid copying it.
d.history.b = dst
d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
if d.o.limitToCap {
d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
}
if d.FrameContentSize != fcsUnknown {
if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
}
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
if debugDecoder {
println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
}
return dst, ErrDecoderSizeExceeded
}
if debugDecoder {
println("maxSyncLen:", d.history.decoders.maxSyncLen)
}
if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)
if err != nil {
break
}
if debugDecoder {
println("next block:", dec)
}
err = dec.decodeBuf(&d.history)
if err != nil {
break
}
if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
err = ErrDecoderSizeExceeded
break
}
if d.o.limitToCap && len(d.history.b) > cap(dst) {
println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
err = ErrDecoderSizeExceeded
break
}
if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
}
if dec.Last {
break
}
if debugDecoder {
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
}
}
dst = d.history.b
if err == nil {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
d.crc.Write(dst[crcStart:])
err = d.checkCRC()
}
}
}
d.history.b = saved
return dst, err
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"encoding/binary"
"fmt"
"io"
"math"
"math/bits"
)
type frameHeader struct {
ContentSize uint64
WindowSize uint32
SingleSegment bool
Checksum bool
DictID uint32
}
const maxHeaderSize = 14
func (f frameHeader) appendTo(dst []byte) []byte {
dst = append(dst, frameMagic...)
var fhd uint8
if f.Checksum {
fhd |= 1 << 2
}
if f.SingleSegment {
fhd |= 1 << 5
}
var dictIDContent []byte
if f.DictID > 0 {
var tmp [4]byte
if f.DictID < 256 {
fhd |= 1
tmp[0] = uint8(f.DictID)
dictIDContent = tmp[:1]
} else if f.DictID < 1<<16 {
fhd |= 2
binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID))
dictIDContent = tmp[:2]
} else {
fhd |= 3
binary.LittleEndian.PutUint32(tmp[:4], f.DictID)
dictIDContent = tmp[:4]
}
}
var fcs uint8
if f.ContentSize >= 256 {
fcs++
}
if f.ContentSize >= 65536+256 {
fcs++
}
if f.ContentSize >= 0xffffffff {
fcs++
}
fhd |= fcs << 6
dst = append(dst, fhd)
if !f.SingleSegment {
const winLogMin = 10
windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3
dst = append(dst, uint8(windowLog))
}
if f.DictID > 0 {
dst = append(dst, dictIDContent...)
}
switch fcs {
case 0:
if f.SingleSegment {
dst = append(dst, uint8(f.ContentSize))
}
// Unless SingleSegment is set, framessizes < 256 are not stored.
case 1:
f.ContentSize -= 256
dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
case 2:
dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24))
case 3:
dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24),
uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56))
default:
panic("invalid fcs")
}
return dst
}
const skippableFrameHeader = 4 + 4
// calcSkippableFrame will return a total size to be added for written
// to be divisible by multiple.
// The value will always be > skippableFrameHeader.
// The function will panic if written < 0 or wantMultiple <= 0.
func calcSkippableFrame(written, wantMultiple int64) int {
if wantMultiple <= 0 {
panic("wantMultiple <= 0")
}
if written < 0 {
panic("written < 0")
}
leftOver := written % wantMultiple
if leftOver == 0 {
return 0
}
toAdd := wantMultiple - leftOver
for toAdd < skippableFrameHeader {
toAdd += wantMultiple
}
return int(toAdd)
}
// skippableFrame will add a skippable frame with a total size of bytes.
// total should be >= skippableFrameHeader and < math.MaxUint32.
func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) {
if total == 0 {
return dst, nil
}
if total < skippableFrameHeader {
return dst, fmt.Errorf("requested skippable frame (%d) < 8", total)
}
if int64(total) > math.MaxUint32 {
return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total)
}
dst = append(dst, 0x50, 0x2a, 0x4d, 0x18)
f := uint32(total - skippableFrameHeader)
dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24))
start := len(dst)
dst = append(dst, make([]byte, f)...)
_, err := io.ReadFull(r, dst[start:])
return dst, err
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
const (
tablelogAbsoluteMax = 9
)
const (
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
maxMemoryUsage = tablelogAbsoluteMax + 2
maxTableLog = maxMemoryUsage - 2
maxTablesize = 1 << maxTableLog
maxTableMask = (1 << maxTableLog) - 1
minTablelog = 5
maxSymbolValue = 255
)
// fseDecoder provides temporary storage for compression and decompression.
type fseDecoder struct {
dt [maxTablesize]decSymbol // Decompression table.
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
maxBits uint8 // Maximum number of additional bits
// used for table creation to avoid allocations.
stateTable [256]uint16
norm [maxSymbolValue + 1]int16
preDefined bool
}
// tableStep returns the next table index.
func tableStep(tableSize uint32) uint32 {
return (tableSize >> 1) + (tableSize >> 3) + 3
}
// readNCount will read the symbol distribution so decoding tables can be constructed.
func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
var (
charnum uint16
previous0 bool
)
if b.remain() < 4 {
return errors.New("input too small")
}
bitStream := b.Uint32NC()
nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog
if nbBits > tablelogAbsoluteMax {
println("Invalid tablelog:", nbBits)
return errors.New("tableLog too large")
}
bitStream >>= 4
bitCount := uint(4)
s.actualTableLog = uint8(nbBits)
remaining := int32((1 << nbBits) + 1)
threshold := int32(1 << nbBits)
gotTotal := int32(0)
nbBits++
for remaining > 1 && charnum <= maxSymbol {
if previous0 {
//println("prev0")
n0 := charnum
for (bitStream & 0xFFFF) == 0xFFFF {
//println("24 x 0")
n0 += 24
if r := b.remain(); r > 5 {
b.advance(2)
// The check above should make sure we can read 32 bits
bitStream = b.Uint32NC() >> bitCount
} else {
// end of bit stream
bitStream >>= 16
bitCount += 16
}
}
//printf("bitstream: %d, 0b%b", bitStream&3, bitStream)
for (bitStream & 3) == 3 {
n0 += 3
bitStream >>= 2
bitCount += 2
}
n0 += uint16(bitStream & 3)
bitCount += 2
if n0 > maxSymbolValue {
return errors.New("maxSymbolValue too small")
}
//println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0)
for charnum < n0 {
s.norm[uint8(charnum)] = 0
charnum++
}
if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
b.advance(bitCount >> 3)
bitCount &= 7
// The check above should make sure we can read 32 bits
bitStream = b.Uint32NC() >> bitCount
} else {
bitStream >>= 2
}
}
max := (2*threshold - 1) - remaining
var count int32
if int32(bitStream)&(threshold-1) < max {
count = int32(bitStream) & (threshold - 1)
if debugAsserts && nbBits < 1 {
panic("nbBits underflow")
}
bitCount += nbBits - 1
} else {
count = int32(bitStream) & (2*threshold - 1)
if count >= threshold {
count -= max
}
bitCount += nbBits
}
// extra accuracy
count--
if count < 0 {
// -1 means +1
remaining += count
gotTotal -= count
} else {
remaining -= count
gotTotal += count
}
s.norm[charnum&0xff] = int16(count)
charnum++
previous0 = count == 0
for remaining < threshold {
nbBits--
threshold >>= 1
}
if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 {
b.advance(bitCount >> 3)
bitCount &= 7
// The check above should make sure we can read 32 bits
bitStream = b.Uint32NC() >> (bitCount & 31)
} else {
bitCount -= (uint)(8 * (len(b.b) - 4 - b.off))
b.off = len(b.b) - 4
bitStream = b.Uint32() >> (bitCount & 31)
}
}
s.symbolLen = charnum
if s.symbolLen <= 1 {
return fmt.Errorf("symbolLen (%d) too small", s.symbolLen)
}
if s.symbolLen > maxSymbolValue+1 {
return fmt.Errorf("symbolLen (%d) too big", s.symbolLen)
}
if remaining != 1 {
return fmt.Errorf("corruption detected (remaining %d != 1)", remaining)
}
if bitCount > 32 {
return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount)
}
if gotTotal != 1<<s.actualTableLog {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
}
b.advance((bitCount + 7) >> 3)
return s.buildDtable()
}
func (s *fseDecoder) mustReadFrom(r io.Reader) {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
// dt [maxTablesize]decSymbol // Decompression table.
// symbolLen uint16 // Length of active part of the symbol table.
// actualTableLog uint8 // Selected tablelog.
// maxBits uint8 // Maximum number of additional bits
// // used for table creation to avoid allocations.
// stateTable [256]uint16
// norm [maxSymbolValue + 1]int16
// preDefined bool
fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
}
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
// Using a composite uint64 is faster than a struct with separate members.
type decSymbol uint64
func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol {
return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
}
func (d decSymbol) nbBits() uint8 {
return uint8(d)
}
func (d decSymbol) addBits() uint8 {
return uint8(d >> 8)
}
func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
}
func (d *decSymbol) setAddBits(addBits uint8) {
const mask = 0xffffffffffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8)
}
func (d *decSymbol) setNewState(state uint16) {
const mask = 0xffffffff0000ffff
*d = (*d & mask) | decSymbol(state)<<16
}
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
}
// decSymbolValue returns the transformed decSymbol for the given symbol.
func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) {
if int(symb) >= len(t) {
return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t))
}
lu := t[symb]
return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil
}
// setRLE will set the decoder til RLE mode.
func (s *fseDecoder) setRLE(symbol decSymbol) {
s.actualTableLog = 0
s.maxBits = symbol.addBits()
s.dt[0] = symbol
}
// transform will transform the decoder table into a table usable for
// decoding without having to apply the transformation while decoding.
// The state will contain the base value and the number of bits to read.
func (s *fseDecoder) transform(t []baseOffset) error {
tableSize := uint16(1 << s.actualTableLog)
s.maxBits = 0
for i, v := range s.dt[:tableSize] {
add := v.addBits()
if int(add) >= len(t) {
return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t))
}
lu := t[add]
if lu.addBits > s.maxBits {
s.maxBits = lu.addBits
}
v.setExt(lu.addBits, lu.baseLine)
s.dt[i] = v
}
return nil
}
type fseState struct {
dt []decSymbol
state decSymbol
}
// Initialize and decodeAsync first state and symbol.
func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.dt = dt
br.fill()
s.state = dt[br.getBits(tableLog)]
}
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package zstd
import (
"fmt"
)
type buildDtableAsmContext struct {
// inputs
stateTable *uint16
norm *int16
dt *uint64
// outputs --- set by the procedure in the case of error;
// for interpretation please see the error handling part below
errParam1 uint64
errParam2 uint64
}
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
// Function returns non-zero exit code on error.
//
//go:noescape
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
// please keep in sync with _generate/gen_fse.go
const (
errorCorruptedNormalizedCounter = 1
errorNewStateTooBig = 2
errorNewStateNoBits = 3
)
// buildDtable will build the decoding table.
func (s *fseDecoder) buildDtable() error {
ctx := buildDtableAsmContext{
stateTable: &s.stateTable[0],
norm: &s.norm[0],
dt: (*uint64)(&s.dt[0]),
}
code := buildDtable_asm(s, &ctx)
if code != 0 {
switch code {
case errorCorruptedNormalizedCounter:
position := ctx.errParam1
return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
case errorNewStateTooBig:
newState := decSymbol(ctx.errParam1)
size := ctx.errParam2
return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
case errorNewStateNoBits:
newState := decSymbol(ctx.errParam1)
oldState := decSymbol(ctx.errParam2)
return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
default:
return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
}
}
return nil
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"errors"
"fmt"
"math"
)
const (
// For encoding we only support up to
maxEncTableLog = 8
maxEncTablesize = 1 << maxTableLog
maxEncTableMask = (1 << maxTableLog) - 1
minEncTablelog = 5
maxEncSymbolValue = maxMatchLengthSymbol
)
// Scratch provides temporary storage for compression and decompression.
type fseEncoder struct {
symbolLen uint16 // Length of active part of the symbol table.
actualTableLog uint8 // Selected tablelog.
ct cTable // Compression tables.
maxCount int // count of the most probable symbol
zeroBits bool // no bits has prob > 50%.
clearCount bool // clear count
useRLE bool // This encoder is for RLE
preDefined bool // This encoder is predefined.
reUsed bool // Set to know when the encoder has been reused.
rleVal uint8 // RLE Symbol
maxBits uint8 // Maximum output bits after transform.
// TODO: Technically zstd should be fine with 64 bytes.
count [256]uint32
norm [256]int16
}
// cTable contains tables used for compression.
type cTable struct {
tableSymbol []byte
stateTable []uint16
symbolTT []symbolTransform
}
// symbolTransform contains the state transform for a symbol.
type symbolTransform struct {
deltaNbBits uint32
deltaFindState int16
outBits uint8
}
// String prints values as a human readable string.
func (s symbolTransform) String() string {
return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits)
}
// Histogram allows to populate the histogram and skip that step in the compression,
// It otherwise allows to inspect the histogram when compression is done.
// To indicate that you have populated the histogram call HistogramFinished
// with the value of the highest populated symbol, as well as the number of entries
// in the most populated entry. These are accepted at face value.
func (s *fseEncoder) Histogram() *[256]uint32 {
return &s.count
}
// HistogramFinished can be called to indicate that the histogram has been populated.
// maxSymbol is the index of the highest set symbol of the next data segment.
// maxCount is the number of entries in the most populated entry.
// These are accepted at face value.
func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.maxCount = maxCount
s.symbolLen = uint16(maxSymbol) + 1
s.clearCount = maxCount != 0
}
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
tableSize := 1 << s.actualTableLog
// get tableSymbol that is big enough.
if cap(s.ct.tableSymbol) < tableSize {
s.ct.tableSymbol = make([]byte, tableSize)
}
s.ct.tableSymbol = s.ct.tableSymbol[:tableSize]
ctSize := tableSize
if cap(s.ct.stateTable) < ctSize {
s.ct.stateTable = make([]uint16, ctSize)
}
s.ct.stateTable = s.ct.stateTable[:ctSize]
if cap(s.ct.symbolTT) < 256 {
s.ct.symbolTT = make([]symbolTransform, 256)
}
s.ct.symbolTT = s.ct.symbolTT[:256]
}
// buildCTable will populate the compression table so it is ready to be used.
func (s *fseEncoder) buildCTable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
var cumul [256]int16
s.allocCtable()
tableSymbol := s.ct.tableSymbol[:tableSize]
// symbol start positions
{
cumul[0] = 0
for ui, v := range s.norm[:s.symbolLen-1] {
u := byte(ui) // one less than reference
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = u
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
}
// Encode last symbol separately to avoid overflowing u
u := int(s.symbolLen - 1)
v := s.norm[s.symbolLen-1]
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = byte(u)
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
if uint32(cumul[s.symbolLen]) != tableSize {
return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = total - v
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
func (s *fseEncoder) setRLE(val byte) {
s.allocCtable()
s.actualTableLog = 0
s.ct.stateTable = s.ct.stateTable[:1]
s.ct.symbolTT[val] = symbolTransform{
deltaFindState: 0,
deltaNbBits: 0,
}
if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
}
s.rleVal = val
s.useRLE = true
}
// setBits will set output bits for the transform.
// if nil is provided, the number of bits is equal to the index.
func (s *fseEncoder) setBits(transform []byte) {
if s.reUsed || s.preDefined {
return
}
if s.useRLE {
if transform == nil {
s.ct.symbolTT[s.rleVal].outBits = s.rleVal
s.maxBits = s.rleVal
return
}
s.maxBits = transform[s.rleVal]
s.ct.symbolTT[s.rleVal].outBits = s.maxBits
return
}
if transform == nil {
for i := range s.ct.symbolTT[:s.symbolLen] {
s.ct.symbolTT[i].outBits = uint8(i)
}
s.maxBits = uint8(s.symbolLen - 1)
return
}
s.maxBits = 0
for i, v := range transform[:s.symbolLen] {
s.ct.symbolTT[i].outBits = v
if v > s.maxBits {
// We could assume bits always going up, but we play safe.
s.maxBits = v
}
}
}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
// If successful, compression tables will also be made ready.
func (s *fseEncoder) normalizeCount(length int) error {
if s.reUsed {
return nil
}
s.optimalTableLog(length)
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(length)
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(length >> tableLog)
)
if s.maxCount == length {
s.useRLE = true
return nil
}
s.useRLE = false
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
err := s.normalizeCount2(length)
if err != nil {
return err
}
if debugAsserts {
err = s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
s.norm[largest] += stillToDistribute
if debugAsserts {
err := s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *fseEncoder) normalizeCount2(length int) error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(length)
tableLog = s.actualTableLog
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
distributed++
total -= cnt
continue
}
if cnt <= lowOne {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
s.norm[i] = notYetAssigned
}
toDistribute := (1 << tableLog) - distributed
if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
}
toDistribute = (1 << tableLog) - distributed
}
if distributed == uint32(s.symbolLen)+1 {
// all values are pretty poor;
// probably incompressible data (should have already been detected);
// find max, then give all remaining points to max
var maxV int
var maxC uint32
for i, cnt := range s.count[:s.symbolLen] {
if cnt > maxC {
maxV = i
maxC = cnt
}
}
s.norm[maxV] += int16(toDistribute)
return nil
}
if total == 0 {
// all of the symbols were low enough for the lowOne or lowThreshold
for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) {
if s.norm[i] > 0 {
toDistribute--
s.norm[i]++
}
}
return nil
}
var (
vStepLog = 62 - uint64(tableLog)
mid = uint64((1 << (vStepLog - 1)) - 1)
rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining
tmpTotal = mid
)
for i, cnt := range s.count[:s.symbolLen] {
if s.norm[i] == notYetAssigned {
var (
end = tmpTotal + uint64(cnt)*rStep
sStart = uint32(tmpTotal >> vStepLog)
sEnd = uint32(end >> vStepLog)
weight = sEnd - sStart
)
if weight < 1 {
return errors.New("weight < 1")
}
s.norm[i] = int16(weight)
tmpTotal = end
}
}
return nil
}
// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog
func (s *fseEncoder) optimalTableLog(length int) {
tableLog := uint8(maxEncTableLog)
minBitsSrc := highBit(uint32(length)) + 1
minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2
minBits := uint8(minBitsSymbols)
if minBitsSrc < minBitsSymbols {
minBits = uint8(minBitsSrc)
}
maxBitsSrc := uint8(highBit(uint32(length-1))) - 2
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
}
if minBits > tableLog {
tableLog = minBits
}
// Need a minimum to safely represent all symbol values
if tableLog < minEncTablelog {
tableLog = minEncTablelog
}
if tableLog > maxEncTableLog {
tableLog = maxEncTableLog
}
s.actualTableLog = tableLog
}
// validateNorm validates the normalized histogram table.
func (s *fseEncoder) validateNorm() (err error) {
var total int
for _, v := range s.norm[:s.symbolLen] {
if v >= 0 {
total += int(v)
} else {
total -= int(v)
}
}
defer func() {
if err == nil {
return
}
fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen)
for i, v := range s.norm[:s.symbolLen] {
fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v)
}
}()
if total != (1 << s.actualTableLog) {
return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog)
}
for i, v := range s.count[s.symbolLen:] {
if v != 0 {
return fmt.Errorf("warning: Found symbol out of range, %d after cut", i)
}
}
return nil
}
// writeCount will write the normalized histogram count to header.
// This is read back by readNCount.
func (s *fseEncoder) writeCount(out []byte) ([]byte, error) {
if s.useRLE {
return append(out, s.rleVal), nil
}
if s.preDefined || s.reUsed {
// Never write predefined.
return out, nil
}
var (
tableLog = s.actualTableLog
tableSize = 1 << tableLog
previous0 bool
charnum uint16
// maximum header size plus 2 extra bytes for final output if bitCount == 0.
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2
// Write Table Size
bitStream = uint32(tableLog - minEncTablelog)
bitCount = uint(4)
remaining = int16(tableSize + 1) /* +1 for extra accuracy */
threshold = int16(tableSize)
nbBits = uint(tableLog + 1)
outP = len(out)
)
if cap(out) < outP+maxHeaderSize {
out = append(out, make([]byte, maxHeaderSize*3)...)
out = out[:len(out)-maxHeaderSize*3]
}
out = out[:outP+maxHeaderSize]
// stops at 1
for remaining > 1 {
if previous0 {
start := charnum
for s.norm[charnum] == 0 {
charnum++
}
for charnum >= start+24 {
start += 24
bitStream += uint32(0xFFFF) << bitCount
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
}
for charnum >= start+3 {
start += 3
bitStream += 3 << bitCount
bitCount += 2
}
bitStream += uint32(charnum-start) << bitCount
bitCount += 2
if bitCount > 16 {
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
bitCount -= 16
}
}
count := s.norm[charnum]
charnum++
max := (2*threshold - 1) - remaining
if count < 0 {
remaining += count
} else {
remaining -= count
}
count++ // +1 for extra accuracy
if count >= threshold {
count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[
}
bitStream += uint32(count) << bitCount
bitCount += nbBits
if count < max {
bitCount--
}
previous0 = count == 1
if remaining < 1 {
return nil, errors.New("internal error: remaining < 1")
}
for remaining < threshold {
nbBits--
threshold >>= 1
}
if bitCount > 16 {
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += 2
bitStream >>= 16
bitCount -= 16
}
}
if outP+2 > len(out) {
return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen])
}
out[outP] = byte(bitStream)
out[outP+1] = byte(bitStream >> 8)
outP += int((bitCount + 7) / 8)
if charnum > s.symbolLen {
return nil, errors.New("internal error: charnum > s.symbolLen")
}
return out[:outP], nil
}
// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
// note 1 : assume symbolValue is valid (<= maxSymbolValue)
// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits *
func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 {
minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16
threshold := (minNbBits + 1) << 16
if debugAsserts {
if !(s.actualTableLog < 16) {
panic("!s.actualTableLog < 16")
}
// ensure enough room for renormalization double shift
if !(uint8(accuracyLog) < 31-s.actualTableLog) {
panic("!uint8(accuracyLog) < 31-s.actualTableLog")
}
}
tableSize := uint32(1) << s.actualTableLog
deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize)
// linear interpolation (very approximate)
normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog
bitMultiplier := uint32(1) << accuracyLog
if debugAsserts {
if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold {
panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold")
}
if normalizedDeltaFromThreshold > bitMultiplier {
panic("normalizedDeltaFromThreshold > bitMultiplier")
}
}
return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold
}
// Returns the cost in bits of encoding the distribution in count using ctable.
// Histogram should only be up to the last non-zero symbol.
// Returns an -1 if ctable cannot represent all the symbols in count.
func (s *fseEncoder) approxSize(hist []uint32) uint32 {
if int(s.symbolLen) < len(hist) {
// More symbols than we have.
return math.MaxUint32
}
if s.useRLE {
// We will never reuse RLE encoders.
return math.MaxUint32
}
const kAccuracyLog = 8
badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog
var cost uint32
for i, v := range hist {
if v == 0 {
continue
}
if s.norm[i] == 0 {
return math.MaxUint32
}
bitCost := s.bitCost(uint8(i), kAccuracyLog)
if bitCost > badCost {
return math.MaxUint32
}
cost += v * bitCost
}
return cost >> kAccuracyLog
}
// maxHeaderSize returns the maximum header size in bits.
// This is not exact size, but we want a penalty for new tables anyway.
func (s *fseEncoder) maxHeaderSize() uint32 {
if s.preDefined {
return 0
}
if s.useRLE {
return 8
}
return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8
}
// cState contains the compression state of a stream.
type cState struct {
bw *bitWriter
stateTable []uint16
state uint16
}
// init will initialize the compression state to the first symbol of the stream.
func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.bw = bw
c.stateTable = ct.stateTable
if len(c.stateTable) == 1 {
// RLE
c.stateTable[0] = uint16(0)
c.state = 0
return
}
nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16
im := int32((nbBitsOut << 16) - first.deltaNbBits)
lu := (im >> nbBitsOut) + int32(first.deltaFindState)
c.state = c.stateTable[lu]
}
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
c.bw.addBits16NC(c.state, tableLog)
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"fmt"
"math"
"sync"
)
var (
// fsePredef are the predefined fse tables as defined here:
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
// These values are already transformed.
fsePredef [3]fseDecoder
// fsePredefEnc are the predefined encoder based on fse tables as defined here:
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
// These values are already transformed.
fsePredefEnc [3]fseEncoder
// symbolTableX contain the transformations needed for each type as defined in
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
symbolTableX [3][]baseOffset
// maxTableSymbol is the biggest supported symbol for each table type
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets
maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol}
// bitTables is the bits table for each table.
bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]}
)
type tableIndex uint8
const (
// indexes for fsePredef and symbolTableX
tableLiteralLengths tableIndex = 0
tableOffsets tableIndex = 1
tableMatchLengths tableIndex = 2
maxLiteralLengthSymbol = 35
maxOffsetLengthSymbol = 30
maxMatchLengthSymbol = 52
)
// baseOffset is used for calculating transformations.
type baseOffset struct {
baseLine uint32
addBits uint8
}
// fillBase will precalculate base offsets with the given bit distributions.
func fillBase(dst []baseOffset, base uint32, bits ...uint8) {
if len(bits) != len(dst) {
panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits)))
}
for i, bit := range bits {
if base > math.MaxInt32 {
panic("invalid decoding table, base overflows int32")
}
dst[i] = baseOffset{
baseLine: base,
addBits: bit,
}
base += 1 << bit
}
}
var predef sync.Once
func initPredefined() {
predef.Do(func() {
// Literals length codes
tmp := make([]baseOffset, 36)
for i := range tmp[:16] {
tmp[i] = baseOffset{
baseLine: uint32(i),
addBits: 0,
}
}
fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
symbolTableX[tableLiteralLengths] = tmp
// Match length codes
tmp = make([]baseOffset, 53)
for i := range tmp[:32] {
tmp[i] = baseOffset{
// The transformation adds the 3 length.
baseLine: uint32(i) + 3,
addBits: 0,
}
}
fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
symbolTableX[tableMatchLengths] = tmp
// Offset codes
tmp = make([]baseOffset, maxOffsetBits+1)
tmp[1] = baseOffset{
baseLine: 1,
addBits: 1,
}
fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
symbolTableX[tableOffsets] = tmp
// Fill predefined tables and transform them.
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions
for i := range fsePredef[:] {
f := &fsePredef[i]
switch tableIndex(i) {
case tableLiteralLengths:
// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243
f.actualTableLog = 6
copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1, -1, -1, -1})
f.symbolLen = 36
case tableOffsets:
// https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281
f.actualTableLog = 5
copy(f.norm[:], []int16{
1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1})
f.symbolLen = 29
case tableMatchLengths:
//https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304
f.actualTableLog = 6
copy(f.norm[:], []int16{
1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, -1, -1, -1})
f.symbolLen = 53
}
if err := f.buildDtable(); err != nil {
panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
}
if err := f.transform(symbolTableX[i]); err != nil {
panic(fmt.Errorf("building table %v: %v", tableIndex(i), err))
}
f.preDefined = true
// Create encoder as well
enc := &fsePredefEnc[i]
copy(enc.norm[:], f.norm[:])
enc.symbolLen = f.symbolLen
enc.actualTableLog = f.actualTableLog
if err := enc.buildCTable(); err != nil {
panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err))
}
enc.setBits(bitTables[i])
enc.preDefined = true
}
})
}
package zstd
var fuzzDicts = make([][]byte, 0)
func init() {
fuzzDicts = append(fuzzDicts, []byte{55, 164, 48, 236, 32, 132, 11, 63, 74, 16, 64, 45, 57, 5, 168, 108, 99, 136, 49, 210, 17, 200, 50, 21, 185, 44, 155, 180, 170, 172, 47, 161, 245, 239, 100, 205, 115, 164, 195, 34, 58, 193, 102, 57, 1, 85, 222, 52, 126, 47, 15, 87, 167, 59, 186, 185, 244, 0, 65, 75, 226, 107, 210, 86, 82, 38, 62, 19, 82, 196, 23, 176, 32, 158, 74, 34, 75, 146, 146, 146, 146, 146, 72, 51, 1, 32, 12, 12, 9, 198, 163, 194, 33, 85, 42, 142, 227, 3, 4, 128, 193, 43, 166, 147, 27, 18, 70, 98, 18, 196, 40, 133, 144, 49, 134, 16, 2, 8, 0, 0, 0, 0, 0, 0, 1, 16, 129, 0, 0, 0, 212, 107, 74, 5, 196, 185, 72, 28, 70, 65, 14, 195, 144, 98, 8, 25, 101, 106, 136, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 37, 158, 217, 217, 127, 113, 172, 217, 172, 217, 217, 172, 217, 127, 127, 172, 127, 217, 143, 91, 13, 205, 168, 88, 203, 61, 96, 4, 106, 194, 20, 222, 136, 104, 120, 99, 220, 217, 172, 217, 158, 68, 217, 217, 158, 172, 217, 37, 172, 217, 217, 217, 127, 113, 172, 217, 172, 150, 34, 157, 178, 229, 109, 54, 170, 249, 156, 208, 63, 247, 110, 67, 23, 134, 185, 182, 201, 184, 138, 222, 70, 167, 60, 139, 147, 128, 33, 106, 159, 212, 12, 73, 203, 217, 217, 172, 127, 217, 127, 217, 37, 23, 172, 248, 172, 127, 217, 127, 172, 172, 217, 172, 172, 127, 217, 217, 217, 127, 127, 172, 127, 217, 127, 248, 217, 82, 172, 172, 172, 203, 217, 102, 144, 47, 196, 156, 207, 195, 209, 66, 191, 3, 158, 145, 234, 82, 228, 245, 137, 185, 120, 181, 14, 13, 72, 97, 0, 105, 23, 138, 106, 235, 106, 201, 20, 25, 17, 13, 249, 188, 134, 174, 192, 60, 143, 14, 8, 89, 168, 211, 47, 62, 12, 252, 15, 176, 232, 223, 27, 115, 138, 109, 244, 236, 149, 157, 174, 17, 124, 75, 31, 48, 170, 79, 12, 126, 40, 170, 206, 172, 127, 217, 217, 217, 127, 82, 217, 217, 127, 217, 203, 217, 217, 217, 172, 172, 203, 217, 217, 172, 172, 172, 127, 217, 127, 217, 217, 248, 217, 82, 172, 172, 172, 203, 217, 217, 172, 172, 217, 172, 127, 217, 217, 113, 172, 217, 172, 217, 203, 217, 217, 172, 172, 217, 172, 127, 217, 244, 128, 80, 200, 26, 64, 67, 102, 172, 172, 127, 217, 220, 3, 254, 184, 51, 222, 156, 220, 195, 130, 40, 221, 76, 172, 217, 172, 127, 217, 217, 113, 172, 217, 172, 217, 203, 217, 217, 172, 172, 217, 172, 127, 217, 244, 128, 80, 200, 26, 130, 26, 120, 217, 217, 113, 172, 217, 172, 217, 203, 217, 217, 172, 172, 217, 172, 217, 172, 172, 217, 217, 127, 127, 217, 172, 217, 172, 217, 217, 172, 217, 127, 127, 172, 210, 86, 61, 236, 100, 96, 7, 6, 13, 147, 158, 184, 26, 94, 101, 112, 145, 101, 177, 191, 22, 126, 1, 36, 36, 36, 36, 36, 6, 7, 1, 15, 8, 36, 36, 36, 36, 36, 8, 0, 36, 181, 96, 205, 36, 36, 36, 209, 36, 36, 13, 3, 4, 14, 126, 1, 36, 36, 36, 36, 36, 6, 7, 1, 15, 7, 11, 2, 10, 15, 164, 36, 36, 36, 126, 36, 177, 15, 15, 1, 3, 0, 3, 7, 6, 9, 0, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 181, 96, 205, 36, 36, 36, 209, 36, 36, 13, 3, 8, 0, 10, 9, 15, 4, 9, 36, 36, 36, 36, 36, 209, 36, 36, 12, 3, 8, 9, 13, 2, 12, 12, 13, 5, 6, 6, 0, 5, 3, 8, 12, 1, 5, 12, 12, 0, 15, 6, 11, 4, 2, 8, 36, 36, 36, 209, 209, 36, 36, 209, 10, 36, 36, 209, 36, 6, 6, 13, 15, 14, 11, 5, 7, 5, 4, 15, 8, 36, 36, 36, 36, 36, 36, 209, 36, 36, 1, 4, 11, 7, 36, 36, 36, 3, 14, 7, 12, 216, 36, 36, 36, 6, 36, 36, 36, 36, 36, 209, 7, 2, 10, 15, 6, 3, 0, 1, 10, 11, 14, 8, 7, 0, 14, 6, 13, 5, 13, 12, 36, 36, 36, 2, 2, 12, 126, 209, 13, 11, 3, 4, 1, 5, 12, 12, 0, 15, 0, 15, 164, 36, 36, 36, 126, 36, 177, 15, 2, 197, 96, 149, 36, 36, 36, 36, 36, 36, 36, 36, 209, 209, 36, 36, 36, 6, 9, 15, 2, 36, 36, 209, 209, 36, 36, 0, 14, 0, 8, 11, 7, 5, 11, 36, 36, 36, 4, 36, 209, 36, 10, 6, 209, 36, 36, 1, 5, 5, 8, 209, 36, 126, 36, 9, 10, 9, 15, 15, 14, 9, 14, 36, 6, 36, 36, 8, 1, 13, 3, 12, 3, 101, 34, 36, 10, 7, 7, 3, 0, 3, 5, 8, 3, 7, 12, 13, 5, 8, 11, 4, 6, 3, 2, 8, 2, 1, 5, 15, 7, 0, 2, 0, 6, 12, 13, 10, 126, 36, 36, 209, 36, 209, 36, 36, 36, 209, 36, 36, 209, 36, 36, 36, 36, 36, 36, 209, 36, 209, 36, 36, 36, 36, 36, 4, 15, 15, 14, 209, 36, 36, 9, 6, 15, 1, 7, 2, 209, 36, 36, 36, 4, 9, 36, 36, 209, 209, 36, 36, 209, 36, 4, 7, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 13, 6, 15, 1, 5, 12, 6, 36, 36, 209, 209, 36, 36, 36, 36, 36, 89, 190, 167, 181, 101, 34, 36, 36, 36, 36, 6, 11, 3, 13, 6, 10, 11, 15, 13, 36, 36, 36, 36, 36, 181, 96, 205, 12, 12, 5, 7, 13, 10, 13, 7, 8, 11, 3, 4, 14, 10, 12, 10, 11, 5, 0, 10, 209, 36, 36, 36, 36, 36, 4, 2, 6, 10, 5, 7, 36, 36, 209, 36, 36, 1, 4, 11, 7, 36, 36, 36, 3, 14, 7, 12, 216, 36, 36, 36, 7, 1, 6, 1, 7, 1, 1, 6, 7, 13, 3, 4, 13, 8, 11, 200, 6, 11, 3, 74, 36, 36, 36, 36, 10, 209, 36, 36, 209, 36, 36, 36, 36, 9, 200, 200, 26, 94, 101, 200, 200, 96, 147, 158, 200, 209, 36, 36, 36, 209, 9, 36, 36, 6, 9, 1, 36, 36, 36, 36, 36, 36, 36, 209, 209, 36, 36, 36, 200, 36, 9, 9, 2, 6, 13, 7, 8, 11, 3, 5, 13, 12, 36, 200, 36, 177, 72, 164, 36, 9, 10, 9, 36, 36, 36, 209, 3, 14, 10, 4, 0, 14, 36, 36, 36, 209, 200, 10, 11, 14, 8, 200, 209, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 4, 14, 126, 1, 200, 200, 36, 36, 36, 36, 36, 36, 200, 26, 36, 36, 36, 139, 36, 36, 36, 6, 7, 13, 5, 10, 6, 10, 200, 6, 7, 13, 5, 9, 14, 15, 13, 4, 200, 36, 36, 1, 4, 11, 7, 36, 12, 12, 0, 15, 209, 36, 36, 209, 10, 11, 3, 13, 15, 209, 209, 209, 36, 200, 5, 12, 12, 0, 15, 9, 3, 8, 0, 10, 205, 12, 12, 5, 200, 36, 36, 209, 6, 13, 147, 158, 9, 200, 9, 36, 36, 200, 2, 7, 36, 36, 36, 36, 36, 200, 36, 177, 15, 15, 15, 14, 9, 14, 36, 36, 36, 36, 36, 36, 36, 209, 10, 9, 10, 9, 36, 36, 36, 36, 209, 36, 36, 3, 14, 5, 7, 10, 6, 7, 13, 5, 10, 6, 10, 36, 36, 6, 7, 126, 36, 36, 209, 200, 200, 6, 7, 13, 200, 9, 14, 15, 13, 200, 200, 5, 12, 12, 0, 200, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 5, 200, 4, 0, 14, 36, 36, 4, 15, 8, 36, 11, 15, 12, 6, 0, 1, 5, 5, 13, 12, 36, 36, 2, 2, 12, 200, 200, 200, 200, 11, 3, 4, 1, 12, 12, 0, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 200, 2, 197, 96, 200, 200, 200, 8, 14, 1, 7, 1, 1, 6, 13, 3, 4, 8, 11, 200, 12, 5, 7, 13, 10, 74, 7, 8, 11, 1, 3, 0, 3, 7, 6, 9, 200, 9, 200, 200, 13, 11, 36, 209, 36, 36, 10, 9, 200, 200, 200, 200, 5, 4, 6, 3, 2, 8, 14, 1, 7, 1, 200, 15, 14, 11, 5, 9, 200, 4, 15, 8, 200, 209, 36, 36, 13, 15, 209, 209, 36, 200, 5, 200, 36, 177, 72, 164, 36, 3, 8, 12, 1, 200, 200, 3, 14, 7, 3, 14, 7, 12, 167, 181, 101, 1, 6, 1, 200, 36, 36, 13, 3, 13, 15, 3, 96, 205, 36, 200, 209, 36, 36, 10, 36, 200, 2, 7, 3, 4, 14, 126, 200, 2, 8, 14, 1, 13, 6, 10, 11, 15, 9, 5, 9, 200, 4, 200, 36, 36, 209, 10, 11, 200, 7, 0, 14, 6, 13, 15, 14, 11, 200, 15, 209, 209, 209, 36, 200, 12, 12, 0, 6, 3, 2, 200, 36, 209, 36, 36, 164, 36, 3, 36, 14, 4, 1, 15, 8, 36, 36, 89, 190, 36, 36, 36, 36, 36, 36, 209, 36, 36, 13, 200, 0, 2, 0, 209, 209, 36, 36, 36, 36, 89, 190, 167, 181, 101, 34, 36, 209, 209, 36, 1, 7, 1, 1, 6, 6, 3, 2, 8, 36, 36, 36, 7, 1, 200, 15, 200, 36, 209, 10, 36, 36, 4, 1, 15, 7, 11, 12, 6, 2, 181, 101, 1, 6, 36, 1, 4, 11, 36, 209, 209, 36, 36, 209, 200, 36, 36, 36, 4, 36, 200, 15, 15, 14, 9, 36, 36, 6, 7, 13, 11, 36, 96, 7, 6, 13, 101, 177, 191, 22, 126, 9, 1, 7, 2, 209, 36, 36, 4, 9, 36, 36, 209, 209, 9, 9, 2, 6, 3, 7, 12, 13, 200, 209, 36, 36, 0, 14, 11, 5, 11, 7, 5, 11, 36, 200, 36, 36, 36, 209, 209, 36, 200, 36, 36, 36, 209, 36, 6, 7, 1, 200, 0, 5, 3, 209, 36, 126, 36, 9, 200, 4, 14, 126, 200, 2, 8, 14, 36, 36, 4, 9, 36, 36, 209, 209, 36, 200, 5, 8, 3, 200, 200, 200, 4, 14, 36, 6, 7, 1, 126, 36, 36, 36, 36, 36, 200, 0, 36, 36, 36, 9, 9, 14, 36, 11, 7, 36, 12, 12, 0, 15, 200, 209, 36, 10, 6, 200, 36, 36, 36, 12, 13, 5, 200, 200, 89, 190, 167, 181, 36, 36, 209, 209, 200, 209, 36, 36, 36, 13, 200, 200, 200, 200, 200, 8, 36, 36, 36, 36, 200, 12, 5, 7, 36, 36, 9, 9, 2, 10, 15, 167, 181, 101, 200, 4, 14, 126, 15, 209, 36, 36, 209, 3, 0, 3, 5, 8, 8, 11, 7, 5, 36, 36, 36, 6, 12, 13, 10, 139, 200, 36, 36, 36, 209, 36, 36, 8, 7, 0, 14, 6, 13, 5, 200, 11, 4, 6, 200, 177, 72, 164, 200, 2, 8, 14, 36, 4, 9, 36, 36, 200, 36, 13, 6, 15, 36, 36, 36, 126, 36, 200, 200, 5, 12, 12, 7, 36, 36, 9, 8, 0, 10, 205, 4, 14, 126, 200, 200, 9, 36, 36, 36, 3, 5, 8, 200, 36, 36, 36, 200, 7, 13, 200, 9, 14, 36, 36, 36, 6, 200, 36, 36, 36, 9, 36, 36, 36, 36, 209, 36, 36, 209, 101, 34, 36, 126, 36, 177, 15, 200, 200, 10, 126, 36, 36, 200, 9, 12, 12, 0, 15, 209, 36, 200, 209, 36, 36, 36, 209, 200, 9, 200, 101, 177, 191, 36, 209, 36, 6, 7, 1, 200, 216, 36, 36, 36, 200, 200, 12, 5, 7, 36, 1, 6, 7, 13, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 78, 195, 245, 59, 217, 14, 252, 44, 26, 12, 204, 59, 197, 142, 29, 197, 226, 175, 153, 217, 212, 98, 72, 19, 172, 89, 208, 43, 201, 108, 168, 138, 233, 88, 222, 65, 209, 31, 219, 186, 171, 126, 201, 32, 28, 155, 152, 86, 47, 225, 214, 5, 41, 87, 218, 40, 219, 12, 182, 245, 144, 149, 218, 88, 138, 18, 23, 157, 207, 218, 63, 171, 193, 220, 201, 220, 193, 66, 238, 192, 83, 47, 65, 43, 37, 76, 151, 159, 213, 58, 37, 142, 81, 216, 39, 134, 47, 167, 133, 158, 180, 122, 162, 227, 84, 83, 104, 58, 235, 206, 64, 122, 174, 54, 91, 80, 157, 33, 21, 120, 18, 118, 61, 122, 48, 22, 122, 144, 76, 233, 62, 147, 232, 105, 138, 217, 233, 34, 205, 1, 210, 151, 77, 116, 61, 101, 184, 187, 203, 254, 62, 66, 234, 95, 67, 203, 31, 0, 6, 116, 8, 5, 0, 172, 228, 252, 22, 1, 242, 247, 126, 255, 13, 38, 145, 189, 161, 254, 56, 54, 70, 189, 110, 93, 52, 210, 113, 108, 149, 244, 54, 218, 169, 194, 160, 86, 227, 129, 134, 5, 107, 215, 232, 14, 253, 136, 195, 147, 239, 137, 214, 91, 174, 242, 172, 88, 7, 210, 14, 143, 136, 24, 154, 75, 33, 176, 210, 70, 183, 31, 146, 243, 123, 21, 158, 57, 149, 174, 191, 143, 169, 216, 31, 127, 98, 246, 166, 161, 32, 139, 196, 150, 150, 233, 124, 156, 67, 90, 169, 246, 181, 149, 119, 46, 103, 140, 15, 131, 209, 103, 172, 27, 198, 170, 71, 36, 56, 251, 205, 158, 106, 154, 9, 15, 48, 104, 237, 222, 140, 34, 198, 60, 198, 206, 247, 49, 238, 93, 65, 170, 87, 188, 183, 166, 154, 43, 49, 44, 83, 44, 26, 31, 92, 234, 67, 174, 144, 124, 121, 73, 143, 176, 3, 197, 50, 239, 110, 84, 164, 161, 253, 22, 226, 89, 138, 67, 205, 63, 128, 230, 207, 115, 81, 221, 70, 2, 196, 162, 214, 11, 89, 150, 202, 169, 247, 60, 117, 142, 243, 203, 85, 96, 41, 16, 47, 121, 182, 51, 172, 125, 180, 120, 79, 86, 233, 209, 186, 81, 154, 174, 9, 60, 57, 166, 190, 107, 179, 121, 100, 150, 119, 181, 198, 45, 36, 184, 66, 30, 54, 239, 14, 196, 71, 248, 184, 166, 252, 8, 132, 40, 235, 170, 99, 124, 174, 165, 24, 171, 186, 186, 191, 94, 44, 188, 204, 62, 219, 192, 30, 143, 20, 58, 67, 14, 86, 52, 15, 253, 142, 137, 61, 24, 171, 220, 177, 194, 133, 56, 202, 110, 202, 120, 66, 22, 97, 110, 169, 180, 172, 47, 96, 12, 102, 122, 108, 214, 183, 41, 82, 108, 192, 10, 159, 104, 26, 152, 127, 122, 112, 217, 180, 2, 186, 60, 107, 206, 108, 78, 78, 16, 113, 232, 230, 68, 17, 153, 65, 90, 243, 116, 169, 69, 112, 24, 220, 170, 146, 19, 208, 89, 129, 30, 50, 138, 81, 223, 3, 98, 114, 203, 91, 32, 143, 28, 186, 234, 116, 62, 81, 142, 177, 54, 9, 189, 132, 14, 102, 72, 203, 1, 27, 55, 91, 212, 180, 209, 187, 201, 206, 64, 219, 24, 143, 74, 197, 247, 220, 21, 137, 80, 64, 225, 174, 57, 189, 78, 198, 143, 133, 216, 148, 115, 7, 197, 144, 102, 167, 163, 153, 182, 77, 141, 238, 82, 220, 1, 115, 153, 153, 26, 118, 50, 158, 189, 61, 111, 182, 52, 0, 246, 213, 245, 7, 123, 25, 118, 148, 224, 206, 243, 34, 204, 206, 128, 154, 116, 235, 229, 255, 193, 254, 111, 130, 0, 27, 55, 94, 251, 78, 70, 28, 63, 170, 14, 92, 159, 199, 218, 131, 89, 124, 242, 87, 184, 158, 222, 48, 208, 246, 49, 78, 33, 143, 251, 211, 195, 1, 239, 206, 47, 37, 12, 65, 56, 142, 238, 112, 172, 107, 188, 139, 122, 64, 54, 11, 71, 220, 255, 223, 63, 27, 206, 129, 204, 56, 32, 34, 63, 84, 78, 245, 199, 48, 143, 181, 186, 45, 94, 191, 157, 19, 58, 22, 214, 244, 209, 106, 82, 68, 225, 65, 134, 253, 7, 230, 159, 233, 58, 48, 173, 136, 28, 73, 116, 245, 37, 179, 20, 16, 121, 42, 124, 171, 214, 109, 244, 117, 149, 244, 19, 222, 121, 26, 175, 115, 188, 213, 190, 142, 190, 137, 188, 22, 30, 127, 227, 46, 147, 128, 195, 230, 207, 76, 47, 182, 172, 171, 47, 202, 172, 37, 207, 93, 0, 226, 20, 79, 121, 103, 129, 196, 103, 57, 170, 91, 124, 22, 133, 69, 231, 34, 99, 249, 99, 245, 21, 47, 249, 194, 148, 91, 201, 104, 162, 167, 171, 169, 200, 67, 160, 91, 170, 144, 92, 240, 71, 243, 255, 137, 169, 179, 92, 6, 147, 91, 73, 177, 72, 111, 16, 111, 118, 153, 5, 234, 238, 60, 164, 140, 122, 131, 86, 27, 105, 140, 212, 106, 180, 235, 45, 89, 148, 161, 73, 168, 12, 124, 72, 110, 3, 202, 247, 1, 195, 183, 79, 200, 56, 41, 46, 175, 35, 54, 91, 28, 179, 104, 109, 10, 227, 252, 211, 182, 118, 100, 165, 25, 202, 101, 64, 113, 202, 103, 96, 7, 95, 107, 242, 203, 184, 239, 190, 152, 109, 149, 84, 64, 60, 40, 238, 30, 109, 0, 116, 45, 47, 101, 236, 170, 39, 210, 219, 139, 79, 44, 240, 215, 198, 3, 143, 128, 218, 121, 130, 34, 207, 138, 224, 129, 35, 49, 123, 222, 56, 203, 110, 218, 250, 142, 70, 50, 213, 139, 49, 236, 74, 8, 16, 149, 157, 143, 154, 131, 133, 193, 103, 225, 241, 251, 85, 187, 170, 195, 92, 119, 91, 90, 114, 191, 215, 79, 177, 109, 161, 214, 43, 32, 31, 126, 74, 214, 160, 48, 11, 48, 99, 10, 195, 4, 179, 82, 237, 188, 194, 136, 79, 41, 90, 190, 18, 166, 54, 139, 93, 16, 81, 32, 193, 142, 162, 93, 75, 48, 182, 94, 77, 73, 114, 219, 229, 241, 49, 121, 253, 145, 102, 200, 227, 179, 51, 251, 157, 77, 121, 243, 54, 157, 135, 197, 231, 133, 126, 153, 118, 114, 183, 64, 205, 88, 218, 90, 34, 200, 234, 176, 196, 218, 125, 108, 214, 161, 81, 194, 225, 32, 96, 180, 3, 163, 94, 54, 146, 115, 130, 93, 71, 16, 23, 161, 121, 71, 68, 39, 31, 194, 141, 241, 24, 254, 217, 189, 66, 148, 202, 14, 96, 226, 101, 238, 131, 116, 38, 160, 251, 98, 96, 136, 195, 178, 88, 217, 18, 38, 221, 81, 66, 193, 24, 204, 170, 151, 86, 193, 225, 236, 134, 71, 167, 83, 205, 94, 160, 143, 93, 187, 210, 228, 215, 55, 209, 239, 48, 203, 125, 101, 173, 8, 92, 81, 54, 74, 157, 213, 69, 85, 63, 221, 158, 169, 58, 171, 104, 60, 148, 243, 74, 161, 48, 65, 154, 56, 186, 220, 192, 48, 15, 74, 70, 45, 255, 157, 237, 130, 36, 247, 202, 27, 0, 169, 76, 76, 16, 199, 57, 14, 206, 47, 182, 45, 109, 245, 50, 25, 61, 200, 240, 198, 85, 246, 173, 109, 188, 66, 53, 47, 61, 48, 18, 174, 185, 77, 43, 89, 172, 20, 85, 112, 129, 220, 29, 23, 103, 202, 202, 202, 202, 202, 202, 202, 202, 218, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 15, 141, 141, 141, 206, 141, 206, 141, 15, 81, 81, 81, 15, 141, 171, 145, 63, 141, 206, 195, 134, 207, 9, 141, 141, 141, 87, 101, 60, 163, 213, 58, 17, 206, 141, 129, 8, 195, 134, 207, 9, 143, 92, 248, 19, 181, 253, 45, 179, 118, 108, 252, 81, 102, 204, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 141, 206, 200, 193, 141, 141, 141, 154, 141, 141, 206, 206, 206, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 141, 15, 206, 180, 154, 94, 184, 141, 206, 141, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 174, 231, 156, 81, 81, 81, 141, 141, 141, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 141, 141, 141, 206, 141, 141, 206, 34, 215, 151, 182, 29, 237, 141, 141, 141, 206, 81, 81, 81, 81, 81, 81, 81, 206, 206, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 81, 81, 81, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 206, 141, 141, 81, 81, 81, 81, 141, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 141, 206, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 81, 141, 81, 81, 141, 206, 206, 14, 233, 141, 141, 141, 141, 81, 174, 231, 141, 206, 206, 141, 206, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 81, 81, 206, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 81, 81, 81, 81, 81, 141, 206, 81, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 81, 81, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 81, 81, 206, 206, 141, 141, 141, 81, 81, 81, 81, 141, 81, 81, 81, 81, 156, 81, 81, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 81, 141, 81, 81, 81, 141, 206, 141, 141, 206, 141, 81, 141, 141, 155, 15, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 81, 141, 141, 141, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 206, 206, 14, 141, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 141, 206, 141, 141, 141, 141, 141, 141, 141, 206, 141, 188, 102, 121, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 141, 154, 141, 141, 141, 253, 45, 179, 118, 108, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 237, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 141, 141, 206, 141, 141, 15, 206, 195, 134, 207, 9, 141, 141, 141, 141, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 14, 141, 206, 206, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 141, 15, 141, 141, 81, 81, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 141, 81, 81, 81, 141, 71, 52, 224, 141, 206, 206, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 141, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 206, 145, 48, 188, 102, 179, 125, 86, 50, 141, 206, 206, 141, 80, 141, 141, 141, 141, 81, 81, 206, 81, 206, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 15, 141, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 206, 141, 81, 81, 81, 81, 141, 141, 141, 206, 141, 206, 141, 141, 141, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 141, 141, 141, 87, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 206, 141, 141, 22, 108, 141, 81, 81, 206, 219, 206, 141, 141, 141, 81, 81, 81, 81, 141, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 206, 80, 141, 141, 141, 154, 141, 141, 141, 141, 141, 206, 108, 141, 81, 81, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 141, 210, 141, 146, 205, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 81, 26, 2, 74, 22, 141, 141, 141, 141, 206, 141, 141, 141, 206, 141, 81, 81, 81, 206, 81, 141, 141, 81, 81, 81, 81, 2, 74, 22, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 206, 206, 141, 141, 206, 141, 141, 141, 141, 15, 206, 141, 141, 206, 195, 134, 207, 81, 81, 81, 81, 81, 141, 141, 141, 80, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 141, 141, 141, 141, 141, 15, 141, 206, 141, 141, 206, 141, 141, 81, 141, 141, 81, 15, 141, 141, 81, 81, 141, 141, 141, 206, 81, 141, 141, 206, 141, 141, 141, 81, 81, 81, 81, 81, 141, 141, 141, 141, 80, 141, 81, 81, 141, 141, 81, 26, 81, 81, 81, 81, 81, 81, 206, 206, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 15, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 206, 206, 141, 206, 206, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 81, 141, 141, 206, 81, 141, 206, 15, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 141, 141, 206, 206, 141, 145, 141, 206, 141, 141, 141, 141, 141, 141, 141, 15, 141, 141, 141, 15, 141, 141, 141, 206, 15, 141, 15, 141, 141, 81, 206, 206, 141, 141, 206, 206, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 141, 71, 52, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 141, 206, 141, 15, 141, 206, 15, 141, 145, 141, 206, 141, 15, 206, 206, 141, 141, 206, 206, 15, 141, 90, 85, 85, 172, 177, 85, 85, 3, 85, 85, 172, 85, 172, 85, 172, 95, 3, 85, 90, 85, 85, 85, 85, 85, 90, 3, 3, 85, 172, 85, 90, 85, 172, 85, 3, 90, 177, 172, 172, 85, 172, 172, 3, 85, 172, 3, 3, 85, 85, 85, 3, 172, 85, 85, 85, 85, 85, 3, 172, 85, 85, 85, 90, 8, 85, 177, 172, 8, 3, 85, 85, 85, 177, 85, 90, 90, 3, 177, 13, 90, 172, 85, 85, 177, 85, 85, 85, 85, 172, 85, 85, 85, 85, 85, 3, 85, 85, 85, 85, 172, 172, 3, 85, 177, 90, 85, 3, 172, 3, 85, 90, 90, 85, 172, 85, 172, 3, 85, 172, 85, 85, 85, 85, 85, 8, 172, 177, 85, 172, 90, 172, 172, 177, 172, 172, 85, 85, 172, 90, 85, 85, 90, 172, 90, 90, 172, 172, 85, 3, 85, 172, 172, 3, 172, 3, 172, 3, 172, 182, 3, 177, 85, 177, 85, 90, 182, 182, 206, 81, 206, 206, 206, 141, 141, 81, 81, 141, 141, 141, 206, 81, 141, 141, 206, 141, 141, 141, 81, 81, 81, 81, 81, 141, 141, 141, 141, 80, 141, 81, 149, 149, 149, 149, 149, 149, 149, 149, 161, 186, 57, 3, 8, 2, 14, 1, 109, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 96, 96, 57, 109, 1, 4, 109, 109, 109, 15, 5, 2, 7, 244, 41, 184, 212, 126, 130, 250, 10, 3, 5, 3, 1, 8, 109, 96, 96, 140, 208, 184, 95, 96, 151, 195, 120, 127, 85, 55, 131, 176, 253, 109, 96, 2, 10, 4, 127, 109, 55, 223, 93, 142, 4, 15, 0, 15, 73, 70, 54, 110, 125, 63, 242, 234, 245, 9, 8, 15, 3, 151, 130, 38, 233, 9, 6, 10, 3, 12, 6, 13, 161, 186, 57, 96, 109, 109, 109, 96, 4, 96, 96, 109, 96, 96, 109, 96, 5, 13, 13, 5, 96, 83, 109, 109, 55, 223, 13, 109, 109, 96, 12, 15, 14, 5, 96, 109, 109, 109, 109, 96, 3, 225, 47, 211, 140, 171, 5, 14, 1, 109, 109, 109, 109, 15, 3, 168, 91, 221, 159, 130, 208, 13, 242, 234, 245, 9, 8, 15, 3, 109, 109, 109, 109, 4, 9, 109, 109, 109, 109, 96, 3, 225, 57, 96, 109, 109, 109, 109, 109, 1, 83, 245, 6, 5, 4, 109, 7, 85, 55, 131, 176, 253, 109, 14, 1, 15, 13, 109, 109, 96, 11, 93, 142, 43, 51, 204, 240, 202, 11, 41, 11, 34, 109, 96, 109, 83, 96, 109, 96, 83, 13, 109, 109, 96, 109, 109, 109, 109, 4, 109, 8, 0, 23, 45, 150, 1, 5, 12, 11, 9, 15, 1, 91, 83, 109, 109, 109, 109, 96, 96, 96, 125, 14, 0, 6, 9, 11, 12, 130, 168, 91, 221, 159, 130, 208, 13, 161, 186, 57, 9, 109, 14, 15, 14, 10, 7, 12, 2, 6, 6, 0, 248, 62, 120, 243, 12, 13, 10, 109, 0, 1, 4, 86, 59, 105, 109, 7, 85, 55, 131, 176, 253, 109, 96, 151, 195, 120, 127, 109, 13, 245, 31, 120, 47, 226, 174, 94, 252, 238, 138, 30, 248, 62, 120, 5, 28, 187, 109, 9, 0, 109, 109, 184, 212, 192, 11, 4, 109, 109, 109, 12, 19, 19, 19, 7, 0, 12, 6, 10, 10, 5, 11, 6, 13, 161, 186, 57, 96, 109, 109, 12, 6, 113, 218, 96, 109, 109, 109, 109, 109, 109, 196, 149, 13, 161, 186, 170, 242, 4, 5, 3, 167, 70, 35, 159, 172, 89, 28, 3, 6, 96, 96, 109, 109, 96, 96, 109, 96, 14, 96, 109, 109, 109, 109, 15, 13, 4, 2, 1, 19, 130, 168, 91, 221, 159, 130, 208, 13, 161, 186, 5, 96, 5, 13, 100, 217, 231, 12, 234, 245, 238, 130, 109, 109, 109, 80, 77, 233, 208, 13, 161, 186, 57, 14, 19, 7, 0, 12, 6, 10, 10, 5, 11, 6, 13, 161, 186, 57, 96, 109, 109, 12, 6, 113, 218, 96, 109, 109, 109, 109, 109, 109, 196, 149, 13, 161, 186, 2, 96, 83, 109, 109, 109, 131, 176, 253, 109, 109, 109, 109, 109, 70, 200, 47, 13, 14, 63, 4, 0, 109, 109, 83, 70, 109, 109, 109, 109, 109, 109, 109, 109, 100, 210, 174, 49, 32, 55, 223, 93, 142, 43, 4, 109, 109, 109, 100, 210, 174, 49, 32, 15, 13, 3, 11, 1, 15, 14, 8, 85, 55, 131, 176, 253, 3, 77, 31, 43, 62, 108, 2, 196, 30, 109, 14, 237, 167, 70, 35, 159, 172, 89, 28, 187, 109, 11, 3, 12, 8, 69, 109, 109, 96, 109, 70, 109, 109, 32, 245, 31, 120, 47, 226, 149, 13, 161, 186, 57, 3, 8, 70, 54, 110, 125, 6, 7, 8, 6, 109, 96, 109, 109, 5, 5, 13, 96, 151, 195, 120, 109, 109, 109, 83, 70, 109, 109, 109, 109, 109, 109, 109, 109, 100, 210, 174, 49, 32, 55, 223, 93, 142, 43, 51, 204, 240, 202, 5, 9, 109, 83, 96, 9, 2, 109, 109, 15, 3, 168, 91, 221, 11, 4, 109, 7, 85, 55, 131, 176, 253, 109, 14, 5, 13, 100, 217, 231, 12, 234, 245, 238, 130, 109, 109, 109, 83, 70, 109, 109, 109, 109, 109, 109, 83, 109, 109, 109, 83, 109, 109, 109, 96, 109, 72, 222, 151, 130, 38, 233, 107, 2, 8, 9, 4, 109, 74, 126, 109, 176, 4, 0, 9, 129, 40, 7, 6, 2, 13, 62, 108, 56, 7, 85, 55, 131, 109, 83, 70, 186, 60, 15, 131, 109, 83, 70, 186, 60, 91, 1, 109, 109, 109, 109, 9, 12, 5, 207, 19, 19, 62, 108, 56, 7, 47, 226, 174, 109, 109, 109, 109, 109, 109, 109, 196, 0, 15, 85, 55, 131, 176, 253, 109, 109, 109, 109, 13, 4, 7, 108, 7, 68, 87, 109, 12, 3, 11, 9, 14, 9, 0, 9, 54, 110, 125, 6, 6, 10, 89, 28, 187, 109, 109, 109, 7, 6, 109, 109, 196, 0, 111, 156, 241, 8, 0, 161, 186, 32, 15, 13, 3, 11, 1, 15, 14, 8, 85, 2, 3, 11, 253, 3, 77, 31, 43, 8, 191, 68, 65, 159, 247, 26, 109, 109, 13, 113, 218, 7, 1, 10, 109, 7, 85, 55, 131, 176, 253, 109, 14, 5, 13, 100, 217, 2, 107, 2, 8, 9, 4, 109, 74, 126, 109, 176, 4, 0, 9, 129, 5, 8, 6, 2, 13, 62, 108, 56, 7, 85, 55, 131, 109, 83, 70, 60, 15, 131, 109, 83, 6, 14, 31, 120, 47, 226, 174, 13, 109, 109, 96, 12, 12, 6, 6, 4, 0, 9, 129, 5, 8, 6, 0, 184, 95, 96, 151, 195, 109, 96, 8, 6, 2, 13, 62, 12, 109, 109, 196, 149, 13, 161, 186, 57, 3, 12, 10, 89, 28, 6, 6, 1, 15, 47, 226, 174, 13, 43, 62, 108, 2, 196, 3, 12, 3, 5, 8, 14, 14, 12, 2, 11, 15, 9, 8, 109, 109, 196, 0, 111, 156, 241, 8, 0, 161, 186, 109, 196, 149, 1, 219, 64, 226, 8, 85, 55, 131, 176, 13, 6, 6, 109, 109, 109, 109, 109, 96, 109, 6, 10, 161, 186, 57, 9, 109, 109, 247, 94, 14, 15, 109, 109, 109, 44, 109, 109, 1, 109, 109, 109, 109, 109, 215, 239, 208, 12, 4, 186, 226, 18, 37, 13, 14, 11, 5, 6, 6, 4, 0, 9, 4, 7, 5, 6, 174, 49, 32, 55, 223, 93, 142, 43, 51, 3, 3, 226, 174, 109, 109, 109, 109, 109, 109, 109, 196, 109, 109, 109, 109, 196, 149, 13, 161, 186, 170, 242, 4, 5, 3, 167, 70, 35, 159, 172, 12, 234, 245, 238, 130, 12, 12, 19, 19, 19, 7, 13, 15, 186, 109, 196, 149, 1, 219, 4, 3, 1, 12, 13, 109, 109, 12, 6, 113, 218, 96, 109, 109, 109, 13, 15, 9, 196, 149, 13, 161, 10, 11, 74, 126, 176, 176, 237, 167, 68, 55, 39, 208, 109, 54, 110, 125, 6, 4, 85, 55, 131, 176, 253, 161, 186, 57, 14, 19, 12, 128, 190, 238, 217, 3, 9, 9, 96, 11, 93, 142, 43, 51, 204, 240, 202, 12, 9, 9, 109, 109, 109, 13, 3, 1, 11, 96, 12, 12, 6, 6, 125, 63, 4, 0, 12, 5, 13, 161, 186, 57, 9, 109, 109, 247, 1, 10, 3, 99, 121, 35, 96, 96, 109, 109, 109, 109, 96, 96, 96, 125, 8, 8, 0, 9, 6, 12, 55, 223, 93, 142, 43, 51, 204, 240, 202, 56, 32, 245, 8, 8, 14, 1, 109, 109, 1, 14, 19, 7, 0, 12, 6, 10, 10, 5, 11, 11, 0, 9, 9, 0, 9, 54, 110, 125, 4, 109, 7, 85, 55, 131, 176, 8, 14, 1, 9, 5, 19, 19, 19, 19, 19, 130, 2, 13, 3, 13, 9, 0, 12, 5, 13, 161, 186, 9, 109, 109, 247, 10, 8, 3, 225, 57, 96, 109, 3, 14, 12, 13, 14, 237, 167, 70, 35, 159, 172, 1, 238, 217, 13, 9, 5, 109, 109, 96, 96, 109, 109, 109, 109, 196, 149, 15, 6, 109, 109, 109, 10, 0, 1, 13, 6, 12, 55, 223, 93, 96, 12, 12, 6, 6, 4, 0, 9, 129, 5, 109, 109, 109, 44, 10, 7, 13, 186, 57, 9, 109, 109, 247, 94, 8, 127, 109, 0, 13, 34, 192, 81, 207, 19, 109, 109, 96, 109, 109, 109, 109, 96, 96, 57, 109, 1, 9, 3, 7, 2, 15, 12, 109, 109, 15, 3, 168, 91, 221, 159, 130, 38, 233, 107, 2, 8, 9, 4, 109, 74, 126, 109, 176, 4, 0, 9, 129, 40, 2, 0, 14, 12, 109, 96, 8, 6, 2, 13, 62, 12, 12, 6, 109, 245, 238, 6, 109, 83, 70, 186, 60, 96, 125, 191, 68, 65, 159, 247, 26, 12, 8, 11, 11, 168, 96, 109, 96, 96, 15, 10, 1, 14, 14, 3, 15, 109, 109, 196, 0, 15, 85, 55, 131, 176, 0, 9, 129, 40, 2, 0, 14, 12, 109, 96, 8, 6, 2, 64, 64, 64, 64, 64, 64, 64, 64, 75, 75, 75, 75, 75, 75, 75, 75, 176, 75, 10, 169, 197, 183, 135, 183, 114, 176, 169, 107, 75, 75, 75, 93, 24, 86, 75, 75, 114, 38, 197, 75, 75, 75, 75, 190, 211, 75, 75, 75, 75, 169, 107, 169, 3, 75, 75, 75, 197, 86, 190, 169, 169, 183, 100, 93, 107, 75, 75, 75, 75, 75, 75, 135, 121, 24, 100, 128, 24, 93, 176, 75, 75, 75, 3, 3, 3, 176, 190, 176, 3, 176, 176, 75, 3, 86, 75, 75, 38, 10, 17, 204, 86, 114, 10, 75, 75, 225, 38, 75, 75, 75, 75, 232, 86, 93, 75, 75, 75, 75, 75, 75, 86, 169, 93, 169, 183, 225, 75, 75, 190, 3, 3, 169, 183, 169, 86, 148, 14, 169, 86, 100, 128, 86, 24, 75, 86, 38, 75, 75, 75, 75, 169, 3, 190, 107, 169, 86, 75, 135, 86, 17, 10, 10, 3, 190, 135, 75, 75, 75, 75, 10, 169, 114, 86, 10, 121, 86, 17, 183, 93, 86, 10, 3, 169, 169, 169, 75, 75, 75, 17, 114, 10, 93, 176, 100, 204, 31, 86, 169, 190, 75, 75, 75, 75, 75, 10, 169, 176, 86, 100, 93, 17, 75, 75, 75, 75, 197, 176, 75, 75, 75, 75, 121, 75, 75, 75, 190, 10, 197, 197, 100, 197, 3, 183, 86, 176, 75, 10, 190, 75, 75, 75, 3, 93, 204, 197, 100, 3, 93, 10, 24, 86, 169, 100, 107, 169, 169, 24, 169, 100, 100, 107, 10, 24, 75, 75, 75, 86, 3, 86, 86, 86, 190, 176, 197, 93, 169, 17, 197, 169, 86, 75, 75, 75, 197, 38, 86, 225, 86, 169, 52, 169, 211, 17, 169, 93, 107, 31, 3, 169, 75, 156, 10, 17, 93, 17, 3, 183, 3, 176, 100, 204, 121, 197, 169, 197, 86, 75, 75, 75, 75, 75, 86, 114, 183, 176, 75, 75, 75, 75, 169, 75, 75, 75, 75, 75, 17, 59, 24, 114, 10, 31, 197, 75, 75, 86, 86, 75, 100, 75, 75, 86, 176, 10, 176, 169, 169, 10, 75, 75, 75, 75, 197, 86, 107, 10, 86, 132, 75, 4, 116, 45, 165, 186, 167, 59, 235, 111, 112, 20, 133, 75, 165, 47, 203, 38, 112, 107, 5, 194, 138, 29, 152, 30, 229, 92, 115, 59, 249, 59, 102, 90, 75, 165, 16, 75, 209, 190, 17, 9, 126, 250, 150, 55, 110, 94, 180, 147, 238, 132, 68, 243, 120, 73, 86, 84, 68, 208, 31, 199, 140, 52, 56, 185, 100, 74, 67, 125, 19, 172, 25, 195, 74, 149, 48, 143, 64, 86, 159, 24, 232, 236, 68, 162, 46, 140, 108, 144, 39, 6, 187, 202, 112, 72, 229, 22, 89, 7, 18, 222, 191, 207, 62, 116, 0, 214, 87, 78, 244, 165, 245, 43, 136, 69, 216, 97, 245, 166, 213, 163, 176, 156, 109, 248, 53, 94, 29, 64, 9, 241, 232, 195, 26, 235, 170, 43, 49, 234, 253, 147, 32, 82, 123, 202, 78, 50, 240, 199, 191, 205, 158, 75, 62, 101, 238, 53, 198, 48, 175, 77, 113, 103, 37, 120, 110, 74, 84, 95, 206, 206, 0, 210, 30, 155, 74, 197, 79, 52, 16, 23, 250, 39, 102, 133, 124, 7, 182, 234, 205, 147, 160, 231, 155, 56, 250, 230, 25, 64, 138, 78, 129, 182, 135, 45, 30, 55, 74, 146, 205, 90, 160, 141, 160, 35, 139, 83, 206, 90, 32, 115, 206, 224, 0, 180, 147, 37, 233, 180, 161, 137, 235, 62, 151, 174, 9, 176, 69, 237, 191, 129, 37, 137, 219, 172, 192, 209, 47, 138, 240, 145, 85, 29, 231, 240, 243, 97, 19, 53, 217, 219, 29, 66, 42, 140, 89, 148, 82, 57, 91, 97, 146, 143, 15, 13, 72, 6, 133, 175, 39, 37, 130, 234, 103, 69, 200, 165, 0, 6, 101, 13, 99, 97, 115, 171, 197, 145, 40, 59, 76, 242, 144, 173, 73, 82, 69, 19, 159, 75, 254, 160, 70, 107, 98, 142, 195, 18, 52, 24, 93, 10, 99, 250, 195, 49, 220, 141, 98, 219, 186, 110, 254, 200, 192, 118, 207, 52, 156, 225, 90, 165, 144, 121, 9, 43, 184, 68, 126, 110, 63, 143, 206, 62, 94, 203, 242, 0, 59, 26, 174, 73, 74, 144, 240, 80, 20, 85, 217, 47, 167, 132, 235, 100, 111, 189, 244, 64, 68, 93, 159, 13, 217, 236, 194, 41, 234, 8, 4, 185, 222, 7, 34, 209, 55, 129, 125, 95, 19, 68, 249, 163, 50, 116, 29, 37, 200, 9, 64, 211, 124, 179, 246, 210, 124, 168, 54, 132, 177, 41, 173, 132, 150, 210, 9, 164, 53, 32, 221, 42, 218, 182, 34, 4, 166, 174, 11, 7, 2, 190, 55, 244, 181, 95, 109, 75, 91, 58, 242, 186, 126, 17, 22, 111, 200, 53, 224, 4, 192, 178, 175, 103, 129, 40, 199, 231, 150, 69, 233, 173, 37, 137, 219, 103, 38, 158, 91, 8, 251, 143, 166, 78, 132, 163, 103, 97, 219, 0, 130, 246, 251, 138, 230, 198, 223, 26, 98, 141, 25, 23, 202, 246, 53, 126, 125, 193, 43, 130, 172, 110, 75, 117, 240, 13, 118, 218, 197, 46, 166, 216, 139, 3, 92, 48, 18, 16, 231, 52, 211, 127, 165, 238, 203, 170, 84, 199, 33, 61, 166, 1, 43, 127, 206, 136, 224, 66, 61, 108, 78, 223, 32, 244, 187, 70, 18, 39, 89, 180, 163, 201, 43, 171, 3, 109, 151, 169, 52, 67, 61, 144, 31, 195, 234, 1, 19, 136, 57, 56, 42, 166, 236, 11, 35, 55, 251, 73, 44, 211, 204, 43, 151, 51, 73, 117, 125, 255, 54, 212, 226, 132, 179, 195, 46, 109, 108, 115, 145, 180, 137, 228, 225, 12, 83, 72, 166, 189, 203, 53, 4, 201, 23, 32, 18, 118, 249, 28, 9, 141, 232, 86, 139, 241, 216, 174, 40, 131, 12, 31, 158, 148, 250, 4, 220, 43, 162, 42, 108, 186, 212, 79, 107, 69, 180, 203, 60, 21, 238, 52, 88, 6, 178, 53, 191, 206, 30, 62, 197, 51, 207, 128, 180, 189, 13, 232, 27, 209, 42, 205, 236, 242, 164, 3, 1, 149, 183, 116, 25, 207, 42, 152, 253, 139, 198, 138, 241, 101, 149, 48, 107, 203, 20, 243, 157, 180, 250, 61, 84, 148, 225, 217, 195, 221, 143, 157, 28, 109, 42, 209, 239, 117, 36, 223, 15, 184, 200, 128, 243, 94, 53, 244, 163, 106, 11, 204, 203, 33, 0, 118, 54, 146, 189, 158, 206, 48, 0, 43, 121, 79, 214, 17, 102, 163, 163, 242, 181, 226, 13, 22, 182, 74, 237, 167, 93, 55, 82, 74, 111, 112, 84, 217, 183, 102, 173, 109, 58, 15, 14, 79, 243, 225, 21, 249, 69, 52, 31, 202, 118, 2, 165, 80, 89, 24, 82, 17, 15, 226, 40, 151, 6, 50, 183, 146, 110, 7, 180, 106, 102, 188, 163, 121, 94, 14, 146, 36, 144, 144, 169, 142, 69, 238, 255, 41, 177, 35, 109, 113, 30, 17, 234, 61, 114, 47, 69, 74, 240, 181, 21, 157, 45, 181, 140, 15, 51, 63, 61, 105, 211, 31, 81, 24, 114, 71, 5, 32, 12, 64, 8, 231, 137, 51, 56, 103, 249, 139, 70, 79, 2, 230, 215, 237, 122, 200, 150, 108, 43, 103, 236, 51, 26, 249, 106, 126, 22, 177, 249, 162, 47, 244, 12, 183, 74, 217, 48, 40, 72, 5, 208, 242, 201, 130, 96, 187, 60, 156, 142, 154, 149, 190, 105, 20, 221, 211, 226, 235, 2, 154, 215, 166, 61, 36, 142, 141, 139, 224, 184, 59, 228, 205, 211, 117, 147, 57, 137, 16, 157, 149, 186, 255, 67, 88, 159, 88, 21, 242, 217, 56, 51, 69, 221, 238, 58, 139, 238, 84, 99, 123, 35, 162, 57, 131, 174, 127, 5, 98, 215, 60, 254, 44, 38, 31, 43, 62, 108, 56, 7, 85, 55, 102, 176, 33, 77, 24, 126, 54, 246, 133, 73, 40, 112, 60, 247, 92, 53, 130, 250, 111, 102, 154, 15, 36, 8, 124, 172, 171, 124, 98, 77, 46, 16, 185, 31, 16, 74, 241, 15, 140, 40, 230, 219, 97, 162, 57, 204, 71, 224, 110, 90, 79, 242, 7, 194, 154, 239, 38, 251, 22, 38, 246, 209, 84, 49, 141, 229, 83, 83, 252, 118, 66, 133, 94, 143, 88, 120, 115, 37, 114, 59, 76, 195, 132, 79, 173, 158, 252, 26, 155, 181, 163, 45, 199, 8, 237, 136, 90, 214, 159, 17, 66, 4, 107, 87, 136, 86, 166, 193, 10, 119, 129, 229, 170, 31, 107, 170, 29, 199, 92, 111, 18, 172, 5, 124, 25, 40, 167, 140, 185, 4, 186, 87, 153, 177, 109, 237, 114, 2, 9, 23, 118, 33, 141, 91, 120, 207, 210, 96, 220, 84, 156, 161, 241, 28, 127, 10, 83, 129, 30, 103, 127, 140, 240, 248, 22, 74, 57, 176, 207, 80, 56, 129, 152, 111, 160, 5, 9, 2, 82, 82, 82, 4, 82, 82, 233, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 116, 67, 67, 18, 18, 18, 56, 225, 67, 165, 67, 230, 18, 225, 18, 18, 154, 225, 225, 105, 165, 67, 67, 67, 116, 34, 184, 13, 11, 9, 4, 1, 13, 14, 3, 11, 11, 11, 3, 4, 4, 3, 3, 4, 236, 11, 13, 5, 13, 8, 116, 7, 67, 225, 165, 116, 165, 18, 203, 18, 225, 18, 192, 18, 18, 225, 56, 225, 214, 165, 18, 214, 225, 18, 23, 225, 165, 143, 67, 18, 214, 154, 116, 225, 225, 225, 94, 214, 225, 165, 225, 56, 165, 225, 116, 7, 116, 105, 18, 105, 154, 10, 7, 6, 50, 8, 13, 15, 7, 11, 8, 0, 12, 10, 0, 39, 20, 1, 0, 14, 7, 3, 2, 10, 3, 236, 11, 13, 8, 15, 176, 227, 249, 225, 18, 56, 5, 7, 12, 168, 4, 5, 9, 14, 5, 80, 246, 56, 196, 82, 2, 4, 4, 11, 11, 106, 170, 11, 82, 146, 6, 13, 6, 82, 82, 82, 82, 82, 82, 12, 12, 0, 82, 82, 82, 82, 82, 82, 3, 9, 11, 15, 82, 82, 4, 13, 3, 12, 1, 2, 4, 4, 10, 7, 12, 12, 0, 82, 82, 82, 13, 13, 18, 7, 18, 94, 225, 18, 165, 214, 105, 225, 214, 4, 76, 13, 15, 9, 141, 240, 251, 176, 4, 77, 5, 4, 14, 15, 10, 18, 67, 18, 214, 241, 116, 18, 116, 225, 18, 225, 56, 165, 7, 45, 67, 165, 143, 165, 225, 225, 18, 225, 116, 225, 225, 18, 225, 225, 214, 18, 225, 67, 225, 225, 67, 67, 225, 116, 18, 18, 225, 116, 225, 56, 116, 18, 225, 225, 225, 67, 214, 225, 165, 225, 116, 67, 225, 7, 225, 225, 94, 165, 116, 67, 45, 116, 45, 225, 154, 67, 11, 87, 127, 106, 9, 9, 10, 3, 135, 155, 4, 22, 11, 13, 5, 14, 4, 4, 11, 1, 9, 10, 3, 4, 4, 3, 3, 4, 5, 9, 10, 1, 13, 13, 10, 11, 13, 13, 7, 8, 12, 0, 8, 8, 3, 4, 2, 64, 1, 2, 4, 4, 55, 10, 3, 18, 67, 165, 165, 165, 165, 6, 12, 5, 1, 0, 5, 7, 184, 150, 15, 124, 10, 4, 4, 1, 10, 0, 4, 14, 158, 226, 1, 165, 67, 13, 8, 3, 13, 236, 7, 18, 18, 67, 67, 116, 83, 67, 252, 67, 67, 56, 67, 225, 67, 225, 18, 225, 18, 67, 18, 18, 67, 225, 116, 154, 214, 225, 165, 18, 23, 116, 203, 225, 67, 18, 45, 236, 7, 18, 18, 67, 67, 116, 83, 67, 252, 67, 67, 56, 67, 225, 67, 225, 18, 225, 18, 67, 18, 18, 67, 225, 116, 154, 214, 225, 165, 18, 23, 116, 203, 225, 67, 18, 45, 236, 7, 18, 18, 67, 67, 116, 83, 67, 252, 67, 67, 56, 67, 225, 67, 225, 18, 225, 18, 67, 18, 18, 67, 225, 116, 154, 214, 225, 165, 18, 23, 116, 203, 67, 165, 116, 203, 18, 67, 165, 18, 225, 165, 214, 18, 225, 56, 165, 67, 7, 225, 18, 45, 116, 18, 203, 225, 165, 165, 203, 225, 225, 116, 56, 18, 165, 214, 18, 214, 154, 18, 225, 7, 225, 18, 225, 225, 105, 18, 154, 116, 67, 18, 18, 18, 56, 225, 67, 165, 67, 230, 18, 225, 18, 18, 154, 225, 225, 105, 165, 67, 67, 67, 116, 34, 184, 13, 11, 9, 4, 1, 13, 14, 3, 11, 11, 11, 3, 4, 18, 116, 192, 67, 214, 225, 225, 67, 214, 165, 225, 18, 116, 18, 18, 105, 154, 225, 67, 7, 18, 214, 225, 7, 225, 214, 116, 7, 67, 143, 225, 18, 192, 18, 18, 225, 56, 225, 214, 165, 18, 214, 225, 18, 23, 225, 165, 143, 67, 18, 214, 154, 116, 225, 225, 225, 94, 214, 225, 165, 225, 56, 165, 225, 116, 7, 116, 105, 18, 105, 154, 10, 7, 18, 225, 214, 154, 7, 225, 105, 116, 18, 105, 105, 7, 154, 165, 154, 225, 225, 3, 2, 10, 3, 236, 11, 13, 8, 15, 176, 227, 249, 225, 4, 12, 3, 243, 6, 1, 3, 7, 82, 181, 83, 64, 0, 12, 10, 82, 82, 82, 82, 233, 82, 82, 82, 4, 82, 82, 82, 82, 82, 3, 3, 64, 67, 67, 18, 18, 18, 56, 233, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 241, 4, 181, 18, 56, 18, 225, 116, 56, 1, 13, 14, 3, 165, 105, 105, 225, 18, 102, 241, 102, 3, 3, 4, 236, 11, 13, 5, 13, 8, 8, 11, 11, 87, 127, 106, 9, 9, 10, 3, 135, 155, 4, 162, 181, 4, 236, 11, 13, 214, 225, 165, 18, 23, 116, 203, 225, 83, 181, 4, 11, 11, 3, 4, 18, 181, 83, 241, 102, 124, 116, 225, 241, 116, 67, 225, 18, 23, 143, 83, 102, 102, 102, 102, 102, 102, 102, 222, 181, 13, 10, 11, 13, 13, 7, 8, 12, 0, 8, 8, 3, 4, 2, 64, 1, 2, 4, 4, 11, 87, 127, 106, 9, 9, 4, 143, 143, 4, 162, 82, 3, 13, 82, 233, 143, 83, 2, 82, 82, 82, 4, 82, 82, 233, 82, 82, 82, 82, 82, 241, 67, 13, 8, 3, 13, 236, 11, 82, 82, 82, 82, 82, 116, 181, 83, 8, 13, 15, 7, 11, 8, 0, 82, 82, 82, 3, 13, 241, 116, 67, 225, 18, 23, 225, 3, 2, 10, 3, 236, 82, 3, 0, 13, 82, 82, 82, 82, 83, 187, 162, 7, 225, 225, 94, 165, 116, 67, 45, 116, 67, 56, 67, 67, 4, 4, 181, 83, 14, 3, 11, 11, 45, 102, 102, 4, 4, 162, 18, 18, 18, 56, 233, 82, 102, 225, 116, 116, 67, 56, 165, 116, 56, 18, 165, 116, 203, 18, 67, 165, 18, 225, 165, 214, 18, 225, 4, 241, 83, 102, 26, 83, 7, 225, 18, 56, 233, 82, 82, 82, 82, 82, 82, 64, 181, 18, 225, 165, 214, 18, 225, 82, 82, 82, 82, 82, 233, 82, 82, 16, 162, 151, 13, 236, 11, 82, 7, 67, 225, 225, 154, 7, 18, 116, 165, 105, 105, 225, 18, 67, 56, 214, 214, 214, 18, 18, 67, 7, 225, 18, 56, 241, 82, 82, 3, 3, 64, 67, 67, 18, 18, 18, 225, 67, 165, 67, 230, 18, 225, 18, 18, 102, 181, 4, 181, 143, 225, 18, 192, 18, 18, 225, 56, 225, 214, 165, 18, 214, 225, 12, 10, 82, 82, 82, 83, 3, 13, 82, 82, 82, 4, 116, 56, 18, 165, 116, 203, 18, 67, 165, 18, 225, 165, 214, 18, 225, 124, 162, 244, 64, 225, 143, 181, 11, 8, 6, 1, 116, 18, 225, 18, 45, 225, 45, 56, 67, 225, 116, 18, 4, 252, 154, 225, 94, 225, 203, 45, 225, 252, 162, 18, 18, 67, 225, 116, 154, 214, 225, 165, 18, 82, 82, 82, 82, 82, 82, 82, 82, 82, 12, 12, 0, 12, 10, 82, 82, 82, 82, 233, 82, 82, 82, 82, 82, 82, 82, 162, 18, 116, 67, 18, 4, 165, 67, 230, 18, 225, 18, 18, 102, 181, 4, 181, 143, 225, 18, 192, 18, 18, 225, 56, 225, 214, 165, 18, 13, 14, 3, 11, 11, 11, 8, 6, 1, 116, 18, 225, 18, 45, 225, 45, 4, 244, 102, 77, 82, 82, 82, 82, 82, 82, 128, 233, 82, 82, 82, 64, 18, 18, 18, 9, 4, 1, 82, 181, 48, 116, 67, 45, 116, 45, 162, 181, 102, 162, 83, 162, 102, 4, 11, 3, 4, 18, 203, 222, 4, 1, 13, 14, 3, 11, 11, 11, 3, 4, 18, 18, 154, 225, 225, 105, 165, 67, 67, 67, 4, 102, 247, 181, 64, 83, 105, 64, 181, 225, 18, 225, 56, 165, 7, 45, 67, 165, 225, 225, 67, 214, 165, 165, 64, 225, 225, 67, 214, 225, 165, 225, 116, 102, 146, 102, 165, 214, 18, 225, 102, 56, 214, 214, 214, 18, 18, 67, 7, 102, 241, 4, 241, 225, 6, 1, 3, 7, 82, 181, 83, 102, 181, 244, 181, 4, 83, 8, 12, 0, 8, 8, 3, 4, 2, 64, 1, 2, 4, 4, 11, 87, 127, 162, 83, 181, 4, 56, 233, 82, 82, 82, 82, 82, 82, 18, 225, 56, 165, 7, 45, 67, 165, 225, 225, 67, 214, 165, 165, 64, 225, 225, 67, 214, 225, 165, 225, 116, 102, 146, 102, 165, 214, 18, 225, 102, 56, 214, 214, 214, 18, 18, 67, 7, 102, 241, 4, 241, 225, 6, 1, 3, 7, 82, 102, 181, 26, 102, 102, 124, 241, 162, 241, 4, 64, 181, 146, 102, 165, 214, 18, 181, 83, 102, 162, 151, 13, 82, 82, 162, 102, 222, 181, 4, 181, 13, 14, 3, 165, 102, 125, 125, 125, 125, 125, 125, 125, 125, 73, 247, 157, 32, 36, 114, 116, 73, 116, 30, 159, 157, 32, 114, 73, 71, 206, 200, 116, 116, 120, 116, 243, 251, 32, 114, 116, 30, 114, 157, 200, 157, 116, 159, 32, 157, 75, 114, 73, 116, 116, 116, 114, 157, 157, 71, 116, 32, 157, 202, 36, 36, 116, 30, 116, 206, 159, 30, 202, 157, 202, 116, 116, 116, 116, 71, 159, 157, 251, 116, 71, 32, 114, 114, 116, 116, 116, 243, 116, 247, 200, 30, 75, 71, 118, 116, 32, 157, 73, 32, 116, 114, 157, 253, 30, 116, 116, 71, 116, 32, 157, 73, 116, 157, 71, 32, 71, 116, 116, 157, 245, 116, 116, 71, 116, 159, 114, 202, 116, 116, 251, 32, 116, 114, 206, 204, 71, 116, 71, 114, 114, 36, 116, 116, 116, 116, 202, 71, 159, 32, 116, 114, 157, 251, 32, 114, 157, 159, 116, 243, 116, 73, 73, 71, 116, 114, 157, 161, 157, 75, 122, 73, 71, 71, 116, 116, 71, 116, 71, 116, 157, 251, 32, 116, 245, 71, 116, 116, 116, 71, 71, 116, 116, 114, 114, 116, 243, 116, 202, 116, 73, 73, 116, 114, 251, 32, 157, 159, 202, 116, 116, 116, 116, 30, 116, 116, 200, 114, 157, 116, 243, 116, 73, 73, 71, 116, 71, 71, 157, 200, 71, 116, 32, 36, 73, 159, 116, 157, 200, 32, 36, 36, 36, 36, 116, 36, 114, 114, 116, 116, 71, 114, 157, 36, 116, 71, 159, 114, 114, 116, 17, 114, 116, 71, 30, 116, 30, 30, 202, 116, 73, 73, 116, 114, 114, 71, 247, 71, 30, 32, 245, 116, 159, 243, 122, 116, 17, 32, 200, 114, 200, 243, 116, 116, 114, 157, 36, 116, 200, 116, 202, 77, 114, 71, 118, 30, 116, 32, 202, 159, 114, 114, 114, 116, 116, 157, 71, 32, 32, 71, 116, 30, 157, 200, 157, 204, 32, 36, 157, 116, 71, 116, 243, 116, 116, 30, 73, 71, 116, 36, 36, 36, 114, 71, 116, 204, 157, 167, 159, 71, 157, 71, 32, 116, 71, 116, 116, 116, 71, 159, 114, 71, 71, 32, 118, 36, 116, 71, 159, 159, 116, 116, 116, 202, 71, 73, 32, 157, 159, 202, 73, 200, 116, 116, 71, 73, 120, 73, 116, 114, 71, 116, 116, 206, 163, 116, 116, 116, 159, 73, 116, 75, 157, 118, 30, 245, 116, 71, 116, 71, 116, 32, 116, 71, 116, 159, 114, 204, 116, 114, 204, 116, 114, 116, 116, 116, 77, 71, 34, 73, 73, 73, 116, 30, 73, 251, 32, 116, 116, 157, 251, 116, 116, 116, 32, 75, 157, 161, 114, 71, 71, 114, 157, 245, 200, 200, 200, 114, 118, 32, 73, 116, 114, 73, 116, 116, 114, 206, 159, 30, 116, 118, 114, 71, 71, 206, 71, 116, 116, 116, 202, 116, 116, 30, 116, 114, 157, 114, 114, 116, 116, 30, 36, 116, 116, 116, 73, 73, 36, 71, 36, 116, 200, 116, 116, 71, 200, 159, 114, 122, 159, 202, 116, 71, 71, 243, 204, 243, 17, 71, 157, 159, 116, 157, 243, 116, 71, 116, 159, 116, 116, 36, 200, 114, 75, 200, 157, 251, 32, 114, 71, 159, 75, 251, 32, 116, 114, 157, 116, 116, 116, 167, 206, 32, 116, 114, 157, 200, 159, 159, 200, 32, 116, 17, 17, 17, 17, 17, 116, 243, 243, 116, 116, 116, 73, 159, 116, 114, 157, 73, 71, 114, 116, 243, 116, 73, 73, 71, 116, 159, 114, 202, 116, 116, 116, 114, 116, 116, 116, 116, 79, 116, 243, 114, 159, 161, 73, 116, 116, 116, 114, 73, 32, 116, 114, 71, 116, 157, 30, 118, 32, 116, 245, 114, 71, 200, 116, 116, 116, 71, 116, 116, 114, 116, 114, 116, 116, 71, 116, 116, 114, 116, 114, 116, 116, 71, 116, 243, 116, 243, 116, 202, 116, 73, 73, 161, 34, 71, 36, 116, 245, 36, 32, 116, 30, 202, 114, 71, 116, 159, 71, 200, 116, 243, 116, 73, 73, 71, 116, 71, 71, 202, 157, 116, 116, 116, 71, 200, 116, 71, 114, 71, 157, 243, 116, 243, 116, 116, 116, 208, 157, 36, 116, 116, 157, 114, 71, 114, 71, 116, 32, 71, 114, 114, 116, 114, 71, 71, 36, 36, 36, 116, 32, 116, 114, 116, 32, 200, 75, 116, 30, 71, 116, 114, 159, 161, 116, 157, 71, 116, 71, 116, 243, 243, 116, 71, 116, 73, 71, 30, 157, 157, 251, 32, 116, 116, 116, 116, 75, 36, 116, 116, 116, 116, 157, 159, 200, 34, 71, 118, 32, 116, 32, 73, 116, 114, 73, 200, 200, 251, 114, 116, 73, 73, 157, 116, 71, 36, 116, 114, 157, 116, 30, 73, 243, 71, 163, 116, 200, 114, 159, 116, 116, 32, 157, 159, 243, 71, 157, 116, 116, 71, 30, 202, 157, 200, 32, 116, 116, 71, 71, 116, 116, 114, 157, 114, 71, 79, 71, 157, 116, 116, 116, 157, 32, 73, 71, 157, 36, 116, 71, 159, 114, 114, 77, 116, 245, 200, 200, 200, 114, 118, 32, 208, 157, 32, 116, 116, 116, 71, 159, 79, 116, 116, 116, 116, 251, 32, 114, 71, 116, 120, 116, 118, 204, 206, 36, 116, 116, 200, 116, 71, 116, 114, 116, 157, 71, 32, 116, 71, 202, 245, 114, 159, 71, 71, 200, 116, 116, 114, 116, 116, 116, 114, 243, 116, 157, 251, 32, 116, 245, 71, 116, 200, 243, 116, 159, 71, 116, 71, 202, 116, 71, 200, 116, 71, 114, 71, 157, 251, 32, 114, 77, 34, 116, 116, 116, 30, 114, 116, 17, 32, 116, 71, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 73, 71, 114, 157, 251, 116, 71, 116, 116, 116, 243, 71, 116, 159, 114, 157, 116, 116, 116, 71, 116, 116, 114, 116, 114, 116, 71, 116, 73, 116, 116, 116, 159, 30, 32, 116, 116, 114, 116, 251, 32, 202, 116, 116, 116, 71, 116, 116, 116, 243, 75, 122, 159, 114, 73, 116, 114, 116, 71, 36, 243, 114, 73, 34, 73, 249, 71, 36, 243, 114, 116, 200, 75, 36, 36, 36, 116, 32, 116, 71, 116, 73, 36, 36, 243, 71, 75, 71, 114, 71, 30, 157, 114, 116, 157, 71, 71, 116, 71, 71, 73, 73, 128, 118, 71, 116, 114, 157, 73, 73, 116, 243, 200, 159, 116, 116, 32, 116, 200, 157, 251, 157, 116, 114, 71, 245, 34, 200, 124, 116, 206, 157, 116, 114, 157, 251, 32, 116, 200, 116, 32, 116, 114, 157, 116, 167, 116, 116, 116, 71, 120, 71, 114, 116, 116, 116, 116, 116, 116, 116, 116, 114, 157, 118, 116, 157, 200, 71, 30, 202, 116, 114, 120, 32, 114, 17, 243, 161, 75, 114, 116, 159, 17, 116, 116, 32, 202, 159, 114, 114, 30, 116, 116, 30, 159, 32, 116, 243, 251, 243, 71, 114, 200, 157, 245, 71, 30, 71, 116, 73, 116, 116, 161, 116, 200, 73, 71, 157, 116, 157, 71, 116, 114, 116, 116, 71, 114, 116, 32, 116, 114, 116, 116, 251, 32, 116, 116, 157, 71, 116, 32, 157, 75, 243, 114, 247, 116, 116, 159, 71, 116, 36, 157, 114, 73, 116, 116, 118, 114, 243, 114, 116, 200, 161, 157, 73, 73, 210, 71, 118, 157, 251, 116, 116, 71, 157, 116, 116, 116, 200, 116, 116, 157, 114, 159, 116, 73, 116, 251, 32, 116, 114, 243, 206, 71, 114, 116, 116, 32, 75, 157, 251, 71, 116, 159, 206, 161, 71, 114, 114, 247, 116, 200, 116, 116, 114, 200, 249, 32, 157, 116, 116, 73, 116, 30, 73, 202, 32, 118, 114, 71, 116, 71, 30, 116, 157, 71, 118, 116, 32, 157, 73, 116, 157, 71, 32, 114, 202, 167, 116, 169, 71, 243, 36, 116, 116, 116, 75, 36, 116, 116, 116, 159, 32, 116, 71, 116, 116, 32, 200, 116, 71, 116, 116, 116, 30, 71, 114, 116, 200, 114, 116, 116, 200, 75, 116, 114, 243, 71, 73, 36, 36, 243, 202, 116, 243, 202, 71, 30, 116, 118, 116, 116, 114, 71, 157, 114, 71, 30, 157, 116, 116, 200, 157, 114, 71, 243, 71, 202, 116, 116, 116, 157, 159, 200, 243, 200, 116, 116, 71, 116, 243, 118, 200, 200, 114, 159, 81, 159, 114, 30, 157, 245, 200, 73, 157, 251, 32, 114, 32, 116, 114, 71, 116, 157, 30, 17, 32, 116, 36, 116, 73, 116, 116, 116, 77, 71, 116, 116, 116, 116, 200, 200, 73, 71, 114, 251, 32, 114, 157, 36, 116, 200, 157, 124, 114, 71, 157, 251, 36, 243, 114, 157, 32, 243, 116, 116, 200, 202, 120, 159, 159, 206, 75, 116, 73, 116, 116, 116, 116, 71, 116, 243, 116, 251, 32, 116, 206, 159, 251, 32, 73, 116, 116, 157, 116, 114, 116, 116, 116, 116, 116, 116, 71, 36, 114, 116, 71, 116, 71, 202, 200, 71, 32, 116, 116, 73, 161, 30, 114, 157, 251, 114, 116, 71, 114, 116, 71, 73, 32, 116, 245, 71, 200, 75, 200, 116, 159, 116, 157, 200, 32, 36, 36, 36, 36, 116, 36, 114, 114, 116, 116, 71, 114, 157, 36, 116, 71, 159, 114, 114, 116, 157, 114, 116, 71, 30, 116, 30, 30, 202, 116, 116, 157, 71, 116, 116, 157, 116, 114, 114, 71, 243, 206, 71, 157, 243, 116, 157, 157, 116, 73, 243, 159, 157, 124, 116, 114, 157, 157, 251, 243, 157, 114, 157, 251, 32, 116, 116, 159, 71, 157, 116, 247, 116, 71, 116, 36, 116, 71, 157, 116, 157, 116, 114, 206, 71, 116, 116, 202, 157, 251, 32, 116, 116, 71, 116, 200, 71, 116, 202, 157, 116, 114, 83, 157, 116, 116, 30, 243, 159, 71, 116, 251, 157, 73, 71, 157, 116, 116, 116, 114, 157, 116, 200, 116, 71, 116, 243, 159, 79, 116, 116, 116, 114, 157, 157, 116, 157, 71, 32, 30, 116, 71, 32, 36, 116, 116, 71, 200, 159, 116, 116, 116, 116, 116, 243, 116, 116, 116, 245, 116, 71, 116, 157, 34, 243, 71, 116, 116, 157, 159, 200, 32, 116, 116, 114, 159, 71, 157, 71, 32, 157, 161, 114, 30, 30, 34, 159, 163, 247, 159, 114, 116, 157, 116, 116, 71, 30, 243, 206, 200, 114, 73, 114, 73, 116, 251, 32, 116, 114, 243, 206, 71, 114, 73, 71, 114, 114, 157, 114, 114, 114, 114, 157, 71, 116, 116, 114, 116, 116, 116, 32, 157, 159, 30, 202, 32, 116, 116, 157, 71, 30, 157, 71, 202, 116, 71, 159, 116, 157, 34, 116, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 114, 73, 71, 206, 116, 118, 114, 71, 114, 157, 251, 114, 157, 157, 118, 71, 243, 114, 30, 30, 34, 159, 163, 247, 71, 32, 114, 159, 251, 32, 116, 116, 32, 75, 157, 157, 71, 245, 71, 251, 114, 116, 114, 116, 116, 71, 202, 71, 71, 206, 116, 116, 116, 200, 118, 116, 116, 200, 202, 157, 116, 71, 71, 114, 114, 243, 116, 116, 114, 116, 73, 116, 71, 36, 243, 116, 116, 116, 116, 116, 30, 73, 159, 202, 116, 71, 114, 116, 118, 114, 114, 71, 200, 71, 116, 30, 30, 159, 206, 30, 114, 116, 157, 30, 32, 116, 71, 116, 32, 200, 73, 202, 157, 116, 114, 159, 116, 73, 71, 116, 243, 243, 206, 71, 157, 243, 206, 79, 116, 71, 116, 73, 114, 36, 243, 73, 30, 204, 116, 114, 116, 116, 114, 157, 243, 116, 114, 118, 114, 157, 200, 116, 251, 114, 114, 159, 73, 118, 116, 116, 116, 116, 71, 114, 71, 114, 157, 114, 73, 200, 71, 157, 32, 73, 116, 36, 36, 116, 116, 114, 116, 116, 71, 116, 159, 200, 71, 71, 116, 71, 114, 114, 118, 116, 114, 116, 36, 116, 116, 114, 157, 116, 116, 17, 159, 116, 116, 116, 114, 157, 159, 159, 30, 249, 114, 30, 71, 71, 36, 36, 245, 114, 114, 32, 116, 71, 116, 71, 32, 116, 114, 200, 17, 116, 30, 249, 116, 116, 116, 200, 36, 204, 30, 75, 116, 114, 73, 32, 116, 30, 159, 30, 243, 116, 157, 157, 208, 116, 36, 157, 251, 32, 167, 30, 32, 206, 73, 200, 159, 200, 32, 116, 157, 71, 71, 71, 114, 32, 71, 157, 116, 114, 83, 30, 159, 200, 32, 116, 116, 71, 116, 114, 116, 73, 116, 159, 114, 116, 163, 83, 32, 114, 116, 114, 243, 243, 114, 157, 73, 116, 30, 73, 71, 116, 116, 157, 200, 243, 116, 159, 118, 116, 71, 202, 116, 71, 157, 73, 32, 118, 71, 116, 71, 200, 36, 245, 114, 157, 71, 157, 73, 200, 116, 116, 116, 116, 116, 157, 116, 200, 116, 114, 245, 157, 71, 243, 71, 116, 114, 75, 36, 161, 116, 30, 116, 202, 159, 71, 157, 116, 247, 71, 114, 75, 116, 73, 71, 157, 71, 200, 116, 157, 243, 116, 243, 159, 30, 32, 116, 116, 114, 116, 116, 118, 114, 71, 114, 73, 116, 251, 32, 116, 114, 243, 206, 118, 200, 116, 114, 116, 116, 30, 202, 73, 114, 200, 71, 75, 200, 116, 71, 116, 116, 200, 114, 71, 32, 116, 114, 73, 116, 71, 36, 116, 116, 71, 206, 71, 200, 114, 118, 71, 30, 71, 116, 159, 114, 116, 157, 71, 114, 73, 202, 73, 116, 157, 243, 116, 200, 243, 71, 116, 159, 114, 202, 116, 75, 200, 251, 116, 71, 73, 71, 73, 114, 116, 116, 116, 71, 30, 30, 36, 71, 245, 71, 251, 114, 157, 116, 251, 206, 36, 157, 202, 245, 114, 116, 202, 114, 157, 73, 200, 116, 36, 116, 120, 116, 116, 114, 116, 120, 71, 73, 245, 157, 116, 71, 157, 169, 202, 116, 71, 114, 75, 159, 118, 73, 114, 157, 114, 71, 161, 71, 30, 116, 116, 116, 116, 71, 75, 114, 71, 204, 30, 32, 116, 116, 116, 116, 116, 116, 243, 200, 157, 30, 157, 116, 116, 116, 116, 71, 118, 116, 157, 71, 32, 36, 116, 116, 71, 206, 118, 157, 71, 200, 114, 116, 116, 200, 75, 159, 249, 114, 116, 116, 114, 206, 116, 116, 71, 116, 71, 116, 167, 116, 157, 71, 32, 71, 116, 200, 116, 118, 204, 116, 71, 116, 116, 157, 71, 116, 32, 116, 114, 17, 71, 114, 71, 71, 71, 73, 73, 157, 157, 159, 202, 116, 71, 36, 36, 116, 116, 116, 73, 73, 36, 71, 71, 114, 157, 116, 116, 116, 116, 200, 114, 116, 114, 116, 204, 71, 114, 116, 116, 71, 30, 114, 200, 114, 73, 116, 116, 114, 159, 208, 157, 79, 200, 116, 116, 71, 200, 17, 71, 116, 157, 157, 114, 71, 36, 116, 114, 71, 116, 116, 116, 116, 71, 114, 159, 30, 116, 200, 73, 116, 116, 116, 30, 114, 116, 32, 204, 71, 36, 71, 116, 200, 71, 75, 73, 30, 116, 251, 32, 157, 161, 161, 114, 114, 200, 200, 114, 163, 114, 116, 114, 116, 200, 116, 116, 116, 116, 114, 114, 116, 157, 32, 157, 200, 157, 200, 73, 73, 116, 116, 204, 245, 116, 116, 71, 116, 116, 116, 116, 247, 71, 157, 116, 71, 116, 114, 116, 116, 116, 159, 75, 243, 161, 75, 114, 116, 159, 116, 71, 32, 116, 114, 114, 251, 32, 116, 114, 116, 116, 71, 71, 71, 116, 116, 243, 116, 159, 71, 116, 71, 116, 71, 157, 116, 116, 116, 116, 116, 206, 36, 116, 71, 116, 116, 116, 116, 114, 120, 71, 36, 36, 36, 116, 32, 116, 157, 116, 114, 116, 157, 116, 114, 116, 71, 114, 71, 159, 157, 73, 200, 200, 114, 200, 71, 114, 73, 116, 116, 71, 71, 34, 32, 73, 71, 116, 73, 114, 116, 32, 73, 71, 73, 116, 157, 73, 71, 157, 71, 75, 243, 157, 114, 75, 116, 157, 116, 114, 157, 243, 116, 116, 116, 200, 245, 116, 73, 114, 73, 30, 157, 73, 30, 157, 245, 116, 73, 200, 157, 200, 157, 116, 116, 157, 116, 114, 200, 30, 116, 157, 247, 157, 251, 71, 159, 200, 202, 157, 116, 116, 116, 200, 17, 157, 73, 116, 116, 116, 167, 116, 116, 157, 243, 157, 116, 116, 116, 163, 30, 36, 32, 116, 36, 116, 71, 157, 116, 114, 116, 157, 200, 200, 200, 114, 118, 32, 73, 116, 114, 73, 243, 30, 73, 71, 116, 159, 116, 71, 116, 32, 157, 159, 30, 202, 32, 116, 116, 159, 71, 30, 243, 114, 157, 114, 114, 116, 32, 157, 165, 159, 206, 32, 114, 116, 159, 159, 202, 116, 71, 116, 114, 118, 71, 114, 116, 114, 114, 243, 32, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 71, 157, 116, 157, 71, 32, 118, 116, 116, 71, 116, 73, 114, 116, 116, 71, 116, 114, 116, 71, 116, 159, 165, 159, 206, 32, 116, 71, 32, 116, 71, 116, 116, 114, 114, 114, 157, 114, 116, 116, 116, 116, 114, 116, 200, 75, 116, 116, 77, 71, 114, 116, 200, 71, 114, 71, 114, 34, 114, 157, 159, 116, 243, 116, 73, 73, 71, 116, 202, 157, 159, 73, 73, 36, 71, 36, 36, 36, 200, 200, 159, 114, 122, 159, 202, 116, 71, 116, 116, 116, 114, 116, 116, 73, 116, 243, 159, 206, 71, 71, 36, 36, 243, 30, 73, 116, 114, 73, 116, 116, 114, 206, 114, 200, 114, 118, 32, 73, 116, 114, 157, 71, 159, 114, 159, 73, 36, 30, 114, 157, 251, 165, 159, 159, 116, 34, 202, 114, 71, 116, 116, 202, 200, 71, 116, 245, 200, 116, 71, 200, 116, 114, 116, 245, 161, 116, 30, 116, 157, 114, 114, 116, 157, 243, 116, 30, 159, 200, 167, 157, 249, 118, 204, 200, 116, 116, 116, 71, 116, 71, 116, 116, 30, 75, 30, 114, 116, 163, 73, 32, 116, 116, 116, 116, 114, 120, 71, 251, 32, 116, 114, 157, 116, 245, 71, 243, 200, 202, 116, 243, 71, 116, 73, 71, 206, 116, 118, 251, 32, 114, 114, 116, 114, 116, 114, 116, 116, 208, 116, 157, 116, 73, 114, 116, 116, 127, 127, 127, 127, 127, 127, 127, 127, 127, 157, 118, 204, 71, 71, 116, 243, 116, 73, 159, 159, 71, 116, 71, 116, 116, 116, 114, 75, 116, 202, 71, 159, 71, 32, 73, 243, 200, 202, 116, 71, 159, 71, 32, 73, 114, 157, 251, 32, 157, 157, 116, 114, 30, 116, 116, 116, 73, 127, 127, 127, 127, 127, 73, 71, 36, 116, 116, 127, 157, 116, 116, 116, 73, 71, 245, 71, 116, 114, 157, 127, 127, 127, 73, 116, 116, 116, 200, 114, 83, 251, 32, 116, 116, 114, 204, 30, 159, 32, 73, 167, 116, 243, 157, 116, 114, 116, 77, 75, 202, 114, 116, 157, 116, 157, 116, 116, 200, 204, 116, 116, 118, 114, 116, 116, 71, 116, 73, 71, 73, 206, 116, 118, 73, 127, 127, 127, 127, 127, 251, 32, 114, 71, 114, 73, 71, 200, 159, 202, 116, 71, 32, 36, 114, 32, 157, 32, 114, 157, 161, 114, 71, 116, 159, 71, 36, 243, 114, 73, 34, 73, 249, 71, 157, 165, 159, 17, 114, 71, 30, 118, 71, 116, 71, 116, 157, 200, 73, 114, 71, 116, 200, 71, 116, 202, 157, 116, 114, 83, 157, 71, 116, 116, 114, 251, 32, 116, 114, 116, 116, 116, 167, 243, 157, 243, 157, 116, 116, 116, 208, 243, 32, 71, 157, 71, 77, 71, 71, 116, 116, 116, 157, 116, 114, 206, 208, 200, 32, 116, 38, 71, 71, 157, 118, 116, 116, 116, 71, 116, 114, 116, 73, 116, 118, 114, 245, 116, 116, 116, 116, 116, 116, 114, 114, 73, 116, 73, 116, 71, 202, 73, 116, 71, 36, 159, 116, 114, 157, 200, 71, 116, 157, 71, 32, 118, 116, 116, 71, 116, 73, 114, 157, 120, 114, 30, 73, 71, 30, 157, 157, 73, 161, 71, 118, 114, 157, 251, 32, 116, 116, 116, 157, 36, 116, 114, 36, 114, 157, 251, 71, 116, 116, 71, 116, 30, 32, 157, 32, 114, 157, 161, 114, 71, 116, 159, 71, 36, 243, 114, 73, 34, 73, 249, 71, 157, 165, 159, 17, 114, 200, 73, 202, 73, 114, 116, 116, 71, 116, 114, 114, 157, 116, 116, 116, 116, 116, 116, 71, 116, 32, 157, 200, 30, 75, 200, 116, 200, 116, 71, 116, 116, 118, 114, 71, 200, 116, 116, 71, 116, 114, 116, 157, 120, 73, 71, 32, 114, 116, 116, 71, 157, 114, 157, 251, 116, 116, 116, 71, 77, 71, 71, 116, 116, 114, 116, 116, 157, 116, 116, 114, 116, 34, 116, 116, 159, 71, 157, 116, 247, 116, 36, 32, 116, 36, 116, 71, 157, 116, 73, 116, 71, 200, 71, 71, 71, 73, 32, 157, 116, 116, 202, 116, 116, 71, 71, 251, 245, 251, 114, 73, 73, 159, 202, 116, 71, 116, 116, 114, 159, 116, 32, 36, 36, 36, 116, 114, 251, 116, 116, 116, 71, 75, 243, 116, 116, 114, 116, 116, 114, 116, 114, 116, 116, 114, 200, 116, 114, 32, 116, 116, 204, 71, 116, 114, 116, 114, 200, 73, 114, 73, 116, 157, 116, 73, 30, 114, 157, 251, 32, 157, 251, 32, 30, 202, 243, 114, 71, 114, 116, 116, 116, 116, 116, 116, 30, 249, 116, 116, 116, 200, 36, 204, 30, 116, 114, 116, 116, 114, 36, 36, 116, 116, 116, 116, 157, 116, 114, 157, 251, 32, 114, 243, 243, 71, 36, 32, 116, 36, 116, 116, 116, 116, 71, 32, 36, 36, 116, 245, 71, 245, 167, 243, 71, 34, 71, 114, 116, 116, 71, 116, 116, 114, 116, 114, 116, 116, 71, 116, 116, 114, 116, 114, 116, 116, 116, 200, 159, 200, 243, 200, 116, 71, 73, 116, 114, 157, 116, 32, 204, 116, 116, 116, 116, 116, 116, 71, 116, 73, 114, 73, 116, 71, 116, 116, 116, 71, 30, 71, 114, 159, 157, 71, 114, 116, 114, 251, 32, 116, 157, 157, 245, 122, 157, 161, 114, 116, 157, 114, 116, 114, 116, 71, 116, 116, 116, 157, 116, 116, 116, 116, 202, 163, 116, 243, 116, 71, 200, 71, 116, 116, 167, 116, 114, 157, 36, 116, 71, 159, 114, 71, 73, 71, 247, 71, 32, 118, 157, 157, 73, 206, 71, 116, 116, 200, 206, 71, 116, 200, 114, 157, 251, 32, 157, 32, 243, 116, 116, 32, 202, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 157, 34, 157, 116, 114, 116, 116, 118, 114, 116, 116, 116, 116, 116, 71, 116, 114, 157, 251, 32, 157, 116, 116, 71, 116, 116, 116, 116, 114, 116, 73, 73, 36, 116, 114, 157, 36, 36, 116, 116, 116, 73, 73, 36, 71, 36, 36, 36, 116, 116, 116, 116, 157, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 71, 116, 32, 73, 71, 116, 32, 36, 30, 114, 157, 251, 32, 116, 114, 157, 32, 116, 71, 116, 116, 243, 75, 122, 161, 157, 30, 200, 243, 114, 114, 71, 251, 32, 116, 71, 116, 32, 200, 157, 200, 116, 251, 116, 36, 116, 116, 32, 36, 36, 36, 30, 159, 163, 114, 243, 157, 243, 71, 159, 116, 157, 34, 159, 159, 200, 32, 36, 114, 32, 71, 30, 116, 73, 116, 116, 157, 73, 157, 116, 157, 116, 157, 71, 245, 32, 114, 116, 114, 200, 247, 243, 116, 116, 116, 32, 71, 114, 114, 157, 159, 116, 243, 32, 36, 36, 36, 243, 204, 71, 34, 159, 251, 157, 71, 32, 200, 114, 157, 73, 71, 202, 116, 114, 249, 73, 32, 157, 73, 116, 116, 71, 116, 116, 157, 159, 114, 245, 157, 251, 116, 157, 116, 157, 71, 71, 204, 247, 116, 116, 204, 73, 71, 116, 32, 157, 159, 243, 71, 71, 157, 114, 116, 114, 71, 114, 114, 157, 118, 157, 30, 116, 116, 116, 116, 71, 71, 204, 116, 157, 116, 116, 114, 73, 118, 71, 157, 163, 36, 116, 116, 71, 116, 114, 200, 114, 116, 75, 200, 75, 116, 116, 116, 161, 81, 114, 116, 116, 124, 71, 116, 157, 34, 157, 116, 36, 71, 251, 243, 116, 200, 116, 114, 202, 204, 243, 17, 116, 243, 114, 71, 200, 71, 116, 32, 71, 36, 116, 114, 157, 251, 30, 157, 159, 116, 114, 30, 116, 71, 118, 114, 157, 114, 71, 30, 114, 116, 73, 245, 157, 71, 32, 114, 202, 116, 116, 208, 116, 202, 116, 116, 116, 200, 157, 73, 251, 116, 116, 116, 116, 157, 36, 116, 114, 157, 71, 116, 159, 114, 202, 116, 114, 77, 157, 116, 116, 200, 114, 157, 157, 73, 116, 116, 116, 116, 202, 116, 71, 200, 71, 114, 116, 116, 114, 116, 116, 71, 116, 71, 32, 71, 32, 116, 71, 210, 114, 159, 116, 116, 32, 161, 71, 116, 243, 243, 114, 243, 116, 114, 116, 159, 116, 116, 71, 159, 116, 157, 157, 251, 116, 116, 116, 116, 71, 71, 116, 116, 157, 124, 32, 114, 116, 114, 114, 71, 30, 32, 202, 30, 157, 116, 114, 159, 30, 202, 116, 157, 200, 116, 116, 116, 116, 116, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 200, 243, 116, 116, 114, 116, 71, 116, 73, 36, 36, 243, 71, 73, 157, 114, 71, 30, 157, 114, 116, 157, 71, 71, 116, 116, 243, 118, 116, 32, 116, 114, 157, 116, 116, 17, 116, 200, 114, 75, 116, 114, 36, 116, 114, 116, 30, 200, 75, 243, 159, 251, 116, 116, 116, 157, 243, 36, 114, 116, 116, 75, 116, 30, 73, 71, 116, 159, 116, 36, 116, 36, 114, 116, 116, 116, 36, 157, 116, 114, 116, 114, 200, 73, 114, 73, 116, 116, 116, 114, 75, 114, 161, 116, 116, 116, 114, 116, 116, 116, 167, 116, 116, 116, 116, 114, 157, 116, 116, 116, 116, 73, 114, 73, 73, 116, 114, 116, 116, 159, 71, 71, 32, 116, 245, 116, 114, 245, 116, 159, 120, 114, 116, 116, 159, 114, 32, 118, 114, 157, 114, 71, 30, 118, 71, 116, 71, 116, 157, 200, 73, 114, 71, 116, 200, 71, 116, 202, 157, 116, 114, 83, 157, 71, 116, 116, 114, 251, 32, 116, 114, 116, 116, 116, 167, 243, 157, 243, 157, 116, 116, 116, 208, 243, 32, 71, 157, 71, 77, 71, 71, 116, 32, 202, 114, 114, 157, 75, 200, 157, 251, 32, 114, 71, 159, 75, 251, 32, 116, 114, 157, 116, 116, 116, 116, 159, 114, 114, 116, 114, 71, 114, 157, 249, 116, 116, 202, 114, 75, 73, 116, 73, 71, 159, 116, 71, 116, 32, 157, 200, 114, 32, 116, 116, 116, 116, 30, 36, 32, 116, 36, 116, 73, 116, 116, 116, 77, 167, 73, 116, 200, 73, 116, 159, 251, 32, 157, 116, 245, 116, 157, 116, 71, 200, 71, 202, 30, 114, 71, 118, 30, 163, 157, 159, 167, 116, 243, 157, 243, 157, 116, 116, 116, 208, 73, 71, 116, 30, 243, 116, 116, 116, 30, 30, 157, 116, 30, 116, 114, 157, 30, 200, 73, 73, 243, 114, 200, 32, 157, 200, 30, 32, 157, 71, 116, 114, 116, 114, 204, 157, 71, 202, 200, 71, 116, 30, 202, 116, 71, 114, 157, 73, 116, 116, 161, 71, 116, 200, 243, 30, 75, 200, 116, 116, 116, 163, 73, 32, 114, 114, 157, 251, 32, 159, 116, 251, 32, 159, 159, 200, 32, 36, 114, 32, 71, 200, 71, 114, 243, 200, 73, 159, 243, 75, 30, 200, 32, 116, 71, 161, 116, 116, 116, 71, 159, 116, 71, 32, 159, 243, 116, 159, 118, 71, 157, 116, 36, 36, 114, 116, 32, 157, 73, 30, 73, 116, 116, 116, 157, 36, 116, 157, 202, 30, 114, 157, 251, 243, 157, 75, 114, 114, 71, 167, 116, 243, 157, 243, 157, 116, 202, 71, 32, 114, 116, 114, 116, 114, 71, 161, 71, 30, 116, 116, 243, 114, 71, 30, 73, 245, 116, 202, 71, 157, 157, 71, 200, 157, 157, 71, 32, 114, 32, 71, 200, 116, 71, 200, 32, 206, 30, 73, 202, 30, 71, 32, 36, 157, 157, 243, 116, 157, 251, 116, 71, 116, 32, 36, 116, 116, 71, 157, 114, 204, 73, 116, 157, 251, 30, 116, 114, 157, 126, 157, 120, 73, 114, 77, 116, 114, 116, 202, 157, 71, 116, 200, 157, 157, 73, 73, 161, 116, 71, 114, 157, 249, 116, 116, 202, 34, 32, 118, 116, 32, 36, 30, 32, 116, 116, 202, 71, 157, 114, 200, 200, 206, 159, 157, 245, 200, 73, 116, 71, 32, 71, 114, 114, 32, 116, 114, 157, 75, 202, 161, 114, 114, 71, 157, 243, 114, 243, 73, 245, 116, 114, 71, 30, 157, 71, 118, 71, 118, 204, 116, 114, 157, 30, 75, 243, 161, 75, 114, 116, 159, 116, 36, 116, 30, 159, 71, 116, 116, 114, 157, 243, 243, 71, 116, 157, 36, 36, 116, 159, 114, 200, 114, 157, 32, 114, 251, 243, 71, 200, 71, 118, 114, 116, 157, 116, 114, 157, 71, 159, 34, 75, 200, 114, 114, 116, 157, 32, 157, 73, 116, 116, 159, 71, 71, 77, 116, 114, 83, 30, 114, 159, 116, 157, 243, 116, 71, 116, 116, 243, 157, 30, 202, 206, 159, 116, 116, 157, 114, 114, 157, 157, 73, 116, 243, 159, 206, 200, 159, 116, 116, 30, 71, 30, 114, 116, 206, 114, 116, 120, 30, 73, 116, 114, 200, 71, 116, 71, 116, 32, 71, 245, 159, 30, 157, 73, 116, 116, 71, 116, 157, 253, 118, 200, 116, 71, 75, 200, 71, 71, 243, 73, 243, 71, 71, 116, 32, 116, 116, 116, 245, 116, 114, 116, 71, 30, 159, 159, 116, 116, 114, 157, 116, 114, 116, 118, 114, 71, 243, 73, 251, 32, 114, 77, 251, 71, 30, 71, 116, 202, 116, 71, 157, 114, 71, 116, 245, 116, 157, 114, 71, 243, 200, 159, 157, 71, 116, 200, 73, 71, 116, 202, 116, 114, 118, 114, 243, 116, 200, 32, 30, 30, 32, 157, 200, 249, 114, 30, 71, 42, 249, 73, 116, 116, 157, 202, 114, 116, 32, 116, 32, 73, 202, 73, 114, 159, 114, 157, 114, 116, 116, 114, 157, 32, 163, 116, 157, 114, 116, 157, 30, 73, 245, 83, 243, 71, 206, 71, 116, 71, 116, 243, 71, 32, 71, 73, 116, 36, 200, 73, 206, 247, 159, 114, 114, 116, 114, 71, 114, 159, 114, 157, 116, 116, 116, 245, 73, 249, 206, 202, 71, 200, 116, 71, 116, 202, 200, 73, 71, 116, 157, 157, 114, 116, 157, 30, 73, 157, 73, 30, 30, 157, 157, 77, 243, 71, 116, 73, 30, 34, 161, 73, 116, 73, 200, 157, 114, 114, 118, 71, 30, 73, 204, 243, 32, 116, 114, 157, 253, 30, 116, 71, 116, 71, 118, 200, 116, 114, 116, 116, 116, 116, 114, 73, 34, 157, 251, 32, 116, 200, 73, 114, 116, 114, 200, 243, 71, 116, 116, 32, 245, 116, 73, 116, 157, 116, 157, 247, 116, 243, 202, 71, 73, 32, 116, 247, 251, 32, 32, 116, 202, 116, 32, 157, 159, 202, 116, 116, 116, 159, 71, 71, 157, 116, 114, 200, 159, 116, 116, 36, 200, 114, 32, 116, 116, 71, 157, 118, 30, 116, 71, 71, 71, 36, 32, 116, 116, 116, 200, 200, 73, 71, 30, 116, 251, 32, 202, 114, 116, 71, 116, 114, 245, 114, 73, 30, 206, 71, 114, 116, 243, 116, 114, 157, 200, 32, 36, 157, 116, 157, 116, 71, 30, 116, 114, 245, 34, 163, 73, 157, 114, 116, 114, 118, 200, 71, 243, 71, 157, 202, 114, 157, 116, 116, 114, 208, 167, 200, 38, 202, 200, 30, 30, 114, 36, 204, 71, 114, 116, 116, 71, 30, 114, 200, 114, 73, 116, 116, 114, 159, 208, 157, 79, 200, 116, 116, 71, 200, 159, 159, 116, 159, 116, 116, 32, 71, 116, 159, 30, 161, 71, 73, 116, 202, 71, 116, 202, 116, 243, 71, 116, 159, 71, 243, 159, 210, 73, 200, 114, 116, 163, 73, 32, 116, 116, 116, 116, 157, 157, 200, 30, 73, 247, 200, 114, 157, 73, 71, 114, 157, 159, 202, 116, 114, 71, 116, 120, 157, 200, 116, 202, 157, 71, 114, 200, 77, 114, 116, 116, 71, 73, 243, 32, 73, 243, 200, 249, 116, 114, 116, 157, 71, 71, 157, 251, 36, 116, 157, 157, 243, 157, 116, 116, 71, 157, 32, 30, 30, 71, 202, 245, 114, 157, 245, 163, 71, 116, 116, 200, 73, 116, 71, 245, 116, 71, 30, 157, 251, 32, 245, 32, 116, 116, 159, 30, 116, 116, 116, 73, 32, 30, 30, 157, 161, 116, 157, 116, 243, 71, 71, 116, 157, 30, 204, 114, 243, 116, 116, 157, 32, 116, 243, 71, 200, 157, 71, 114, 247, 116, 157, 116, 114, 30, 36, 116, 71, 114, 32, 251, 243, 157, 114, 157, 251, 32, 157, 157, 251, 32, 157, 71, 114, 30, 200, 157, 114, 243, 159, 71, 116, 251, 157, 73, 251, 32, 116, 116, 122, 75, 116, 71, 159, 116, 157, 200, 116, 71, 32, 73, 243, 116, 247, 159, 116, 159, 71, 71, 71, 247, 116, 116, 202, 157, 159, 251, 32, 116, 206, 36, 116, 157, 157, 114, 71, 243, 159, 202, 73, 116, 116, 116, 114, 157, 73, 71, 116, 71, 32, 200, 71, 73, 202, 204, 116, 114, 157, 245, 202, 118, 157, 157, 251, 32, 114, 71, 157, 116, 73, 114, 114, 114, 116, 71, 157, 161, 73, 157, 73, 71, 243, 116, 71, 116, 116, 71, 116, 73, 157, 71, 159, 30, 32, 116, 114, 71, 243, 157, 75, 116, 114, 157, 71, 73, 157, 245, 159, 114, 116, 114, 73, 200, 30, 116, 116, 71, 32, 157, 73, 73, 206, 159, 73, 245, 200, 200, 116, 159, 159, 71, 200, 157, 245, 71, 30, 157, 30, 157, 71, 114, 157, 71, 116, 73, 251, 32, 243, 30, 71, 157, 157, 32, 157, 159, 243, 157, 116, 71, 243, 71, 114, 157, 251, 73, 157, 157, 114, 159, 114, 30, 157, 71, 243, 157, 71, 114, 200, 77, 114, 116, 116, 116, 116, 71, 71, 71, 71, 71, 116, 71, 116, 159, 36, 36, 161, 200, 118, 200, 116, 114, 157, 71, 206, 200, 161, 71, 71, 116, 116, 116, 73, 114, 116, 159, 73, 157, 243, 200, 116, 116, 116, 200, 32, 200, 116, 73, 159, 243, 159, 116, 71, 114, 73, 71, 73, 71, 116, 71, 32, 118, 114, 161, 116, 114, 30, 114, 114, 114, 116, 116, 118, 116, 116, 243, 116, 116, 116, 245, 157, 200, 157, 116, 114, 116, 116, 157, 251, 114, 116, 114, 200, 200, 114, 159, 159, 159, 206, 243, 32, 116, 116, 116, 116, 30, 36, 165, 159, 206, 32, 114, 159, 75, 200, 116, 73, 245, 71, 251, 114, 116, 114, 251, 200, 159, 114, 71, 114, 200, 202, 163, 116, 245, 114, 71, 208, 157, 77, 71, 114, 200, 114, 32, 114, 114, 114, 116, 73, 116, 116, 200, 114, 75, 200, 114, 157, 116, 114, 116, 73, 116, 157, 159, 116, 32, 36, 36, 36, 116, 114, 253, 249, 249, 202, 116, 243, 243, 114, 73, 114, 243, 71, 157, 30, 71, 75, 114, 34, 114, 157, 251, 71, 71, 157, 200, 116, 122, 71, 71, 157, 71, 114, 157, 157, 161, 71, 114, 114, 118, 200, 71, 243, 32, 116, 71, 202, 204, 116, 114, 114, 73, 73, 200, 116, 116, 161, 159, 118, 71, 157, 251, 32, 114, 116, 116, 114, 116, 32, 157, 116, 157, 251, 116, 114, 73, 116, 71, 71, 206, 163, 157, 159, 71, 77, 71, 157, 114, 73, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 167, 204, 114, 157, 114, 200, 202, 114, 200, 71, 114, 159, 200, 157, 30, 251, 114, 116, 71, 30, 30, 243, 30, 157, 245, 116, 36, 116, 30, 159, 114, 200, 114, 73, 159, 204, 243, 200, 30, 245, 114, 157, 73, 114, 116, 71, 200, 32, 36, 73, 251, 32, 114, 30, 71, 200, 71, 71, 71, 73, 32, 32, 71, 116, 157, 243, 114, 200, 71, 206, 159, 251, 30, 157, 114, 116, 157, 114, 116, 116, 116, 71, 116, 32, 157, 71, 157, 32, 73, 204, 200, 159, 116, 116, 36, 200, 73, 116, 116, 71, 116, 114, 30, 32, 116, 71, 114, 116, 116, 116, 30, 71, 116, 30, 157, 116, 243, 116, 32, 71, 116, 73, 116, 73, 243, 243, 36, 36, 245, 73, 114, 114, 116, 116, 116, 114, 116, 116, 116, 116, 79, 116, 116, 114, 157, 36, 116, 71, 200, 116, 114, 114, 157, 251, 118, 114, 116, 116, 32, 157, 116, 247, 116, 161, 116, 71, 157, 124, 116, 114, 251, 32, 116, 116, 122, 75, 71, 30, 116, 71, 73, 32, 116, 157, 116, 32, 157, 202, 116, 114, 73, 71, 116, 202, 243, 245, 116, 251, 116, 163, 30, 114, 34, 114, 73, 71, 116, 157, 71, 243, 202, 116, 114, 116, 114, 159, 30, 116, 200, 73, 114, 202, 163, 157, 243, 159, 159, 159, 116, 32, 36, 71, 71, 30, 202, 116, 157, 200, 73, 71, 114, 116, 157, 116, 114, 157, 251, 116, 71, 71, 206, 116, 200, 200, 157, 116, 159, 116, 116, 36, 200, 200, 116, 116, 200, 71, 249, 157, 251, 32, 116, 114, 157, 116, 200, 32, 116, 71, 116, 116, 116, 118, 114, 202, 243, 116, 116, 116, 73, 71, 114, 163, 157, 243, 159, 159, 159, 159, 30, 202, 32, 114, 116, 116, 71, 114, 157, 36, 116, 200, 161, 120, 157, 32, 30, 114, 157, 116, 71, 114, 157, 200, 116, 114, 114, 116, 73, 116, 116, 159, 114, 114, 116, 73, 114, 116, 206, 36, 116, 73, 200, 116, 73, 114, 114, 157, 251, 30, 114, 32, 71, 32, 116, 71, 157, 36, 36, 116, 116, 116, 73, 30, 157, 116, 116, 116, 116, 71, 245, 116, 30, 73, 71, 159, 116, 71, 116, 202, 73, 71, 116, 71, 206, 116, 73, 114, 36, 71, 116, 71, 116, 114, 118, 116, 157, 120, 71, 116, 32, 116, 73, 73, 247, 114, 204, 116, 114, 204, 116, 114, 116, 116, 116, 77, 71, 34, 243, 118, 161, 116, 116, 116, 73, 116, 116, 71, 114, 116, 118, 73, 116, 116, 71, 34, 157, 116, 200, 116, 116, 116, 114, 157, 243, 79, 157, 116, 114, 83, 30, 159, 30, 71, 116, 202, 73, 71, 116, 71, 159, 243, 200, 30, 157, 116, 116, 116, 159, 243, 114, 157, 114, 157, 116, 116, 157, 202, 116, 73, 200, 159, 243, 159, 116, 159, 71, 157, 116, 247, 116, 71, 34, 159, 245, 114, 116, 36, 116, 116, 73, 159, 116, 116, 243, 116, 116, 167, 116, 71, 116, 116, 157, 114, 116, 200, 73, 116, 71, 116, 116, 71, 116, 116, 116, 71, 71, 32, 116, 73, 116, 157, 71, 32, 157, 116, 71, 73, 81, 159, 114, 30, 157, 30, 114, 32, 71, 200, 200, 75, 157, 116, 157, 204, 157, 30, 157, 114, 116, 200, 114, 116, 116, 200, 200, 116, 116, 71, 200, 116, 157, 71, 116, 114, 157, 116, 243, 116, 116, 116, 159, 114, 32, 118, 77, 71, 116, 157, 71, 157, 71, 114, 71, 116, 36, 116, 114, 157, 251, 32, 71, 71, 161, 245, 157, 71, 157, 161, 206, 36, 36, 116, 114, 116, 116, 114, 157, 71, 116, 163, 114, 71, 30, 245, 114, 116, 71, 116, 116, 71, 116, 114, 116, 73, 116, 116, 71, 116, 116, 114, 116, 71, 114, 243, 114, 71, 116, 71, 30, 116, 116, 73, 71, 116, 114, 157, 251, 32, 157, 116, 116, 116, 202, 206, 71, 114, 159, 116, 116, 116, 114, 116, 251, 32, 116, 243, 118, 204, 71, 71, 200, 202, 157, 245, 71, 251, 163, 30, 114, 42, 30, 71, 157, 243, 118, 243, 116, 71, 116, 116, 116, 116, 71, 116, 159, 116, 116, 36, 36, 116, 116, 157, 71, 114, 157, 116, 200, 32, 116, 206, 114, 30, 116, 114, 116, 116, 116, 116, 200, 114, 79, 73, 71, 206, 114, 114, 159, 30, 159, 116, 116, 71, 71, 202, 157, 30, 116, 251, 116, 247, 157, 200, 202, 116, 116, 116, 159, 206, 32, 116, 71, 32, 157, 34, 116, 116, 116, 161, 202, 116, 243, 116, 116, 116, 116, 114, 157, 116, 200, 116, 116, 116, 200, 73, 71, 202, 32, 116, 116, 114, 116, 159, 206, 161, 73, 157, 73, 116, 116, 116, 116, 116, 116, 30, 71, 118, 73, 157, 114, 116, 116, 116, 73, 71, 116, 30, 116, 116, 71, 71, 116, 116, 36, 114, 32, 116, 114, 157, 116, 114, 32, 200, 159, 202, 71, 114, 71, 157, 161, 120, 157, 245, 243, 116, 116, 116, 116, 116, 71, 75, 116, 116, 116, 71, 116, 32, 71, 157, 71, 32, 157, 161, 114, 157, 116, 200, 114, 116, 116, 73, 77, 71, 200, 114, 157, 116, 30, 249, 114, 30, 71, 71, 32, 157, 157, 251, 157, 116, 71, 200, 73, 116, 116, 116, 114, 75, 114, 161, 116, 114, 157, 159, 116, 116, 116, 32, 243, 71, 200, 73, 71, 245, 30, 116, 157, 243, 157, 157, 30, 71, 116, 161, 202, 114, 200, 114, 114, 157, 114, 116, 116, 75, 71, 116, 116, 161, 214, 73, 159, 116, 36, 157, 251, 116, 161, 159, 200, 73, 157, 251, 36, 114, 116, 116, 71, 73, 36, 36, 114, 202, 71, 71, 206, 71, 116, 116, 116, 114, 73, 157, 159, 206, 116, 200, 200, 73, 202, 116, 114, 116, 118, 204, 116, 116, 116, 116, 114, 71, 73, 71, 159, 116, 116, 249, 206, 30, 116, 114, 157, 71, 116, 71, 159, 116, 116, 208, 206, 159, 251, 200, 116, 157, 116, 114, 116, 200, 73, 71, 116, 71, 114, 157, 116, 157, 157, 124, 32, 157, 116, 71, 116, 116, 116, 71, 159, 116, 30, 243, 116, 34, 116, 116, 159, 71, 157, 71, 116, 114, 116, 116, 71, 202, 30, 157, 114, 116, 114, 157, 116, 243, 116, 30, 116, 116, 200, 114, 157, 116, 243, 116, 73, 73, 71, 116, 71, 71, 157, 157, 116, 116, 114, 116, 114, 116, 71, 116, 71, 159, 245, 116, 116, 200, 116, 116, 200, 71, 114, 200, 114, 202, 116, 200, 243, 116, 116, 32, 75, 157, 251, 32, 116, 116, 159, 71, 157, 116, 247, 71, 157, 116, 114, 116, 118, 71, 114, 157, 251, 114, 73, 71, 32, 200, 243, 71, 200, 200, 116, 71, 159, 200, 114, 116, 157, 116, 200, 30, 200, 243, 71, 116, 71, 71, 116, 73, 114, 116, 116, 116, 116, 206, 71, 116, 206, 114, 157, 116, 116, 71, 116, 114, 73, 116, 157, 157, 116, 167, 116, 116, 114, 157, 116, 200, 157, 200, 116, 71, 157, 118, 204, 71, 32, 116, 116, 71, 157, 118, 30, 116, 71, 71, 116, 116, 157, 124, 32, 114, 116, 157, 71, 243, 114, 159, 30, 116, 116, 71, 114, 73, 71, 116, 71, 159, 114, 204, 114, 71, 157, 32, 114, 73, 116, 114, 36, 116, 114, 157, 75, 71, 73, 243, 116, 114, 116, 243, 116, 30, 243, 159, 114, 116, 251, 116, 36, 116, 114, 159, 200, 116, 116, 114, 71, 116, 116, 116, 114, 73, 116, 243, 73, 116, 116, 73, 114, 116, 116, 116, 118, 73, 73, 116, 114, 116, 34, 157, 71, 116, 116, 73, 159, 159, 116, 32, 116, 114, 116, 116, 71, 116, 116, 71, 118, 116, 120, 243, 114, 157, 251, 32, 116, 114, 116, 116, 159, 202, 116, 116, 116, 71, 116, 114, 157, 116, 116, 116, 116, 200, 114, 73, 157, 159, 206, 71, 116, 71, 161, 71, 200, 157, 71, 116, 243, 118, 200, 200, 114, 159, 81, 114, 116, 116, 116, 114, 116, 116, 114, 114, 71, 71, 71, 245, 116, 114, 157, 251, 116, 157, 243, 30, 71, 245, 71, 251, 114, 116, 114, 251, 32, 116, 114, 71, 114, 200, 202, 116, 116, 245, 114, 71, 208, 157, 77, 71, 114, 200, 114, 32, 204, 30, 71, 116, 71, 116, 200, 71, 116, 116, 71, 116, 157, 116, 200, 116, 116, 116, 30, 116, 157, 116, 71, 157, 75, 30, 200, 73, 159, 202, 73, 159, 36, 36, 243, 157, 116, 202, 157, 202, 73, 116, 116, 114, 157, 116, 114, 249, 114, 114, 116, 116, 116, 73, 200, 200, 114, 159, 159, 159, 206, 30, 32, 116, 159, 202, 116, 116, 116, 157, 116, 114, 34, 32, 116, 116, 159, 71, 157, 161, 120, 157, 245, 118, 159, 32, 73, 114, 73, 114, 73, 114, 157, 30, 114, 114, 157, 116, 114, 116, 116, 114, 116, 30, 116, 159, 116, 32, 116, 157, 71, 247, 159, 116, 159, 114, 116, 116, 30, 202, 116, 73, 30, 116, 157, 116, 114, 157, 251, 157, 116, 114, 71, 245, 34, 200, 124, 116, 206, 157, 159, 202, 116, 36, 114, 73, 71, 71, 114, 157, 200, 251, 32, 202, 116, 114, 249, 243, 73, 200, 157, 200, 116, 114, 157, 116, 116, 116, 167, 116, 243, 73, 202, 204, 116, 114, 157, 245, 243, 243, 243, 157, 243, 157, 204, 116, 116, 114, 73, 116, 116, 118, 114, 245, 245, 114, 71, 30, 114, 116, 200, 243, 114, 157, 159, 116, 116, 32, 71, 116, 116, 71, 116, 116, 116, 116, 71, 159, 75, 251, 116, 71, 32, 114, 157, 251, 116, 116, 245, 30, 120, 114, 36, 114, 73, 202, 116, 116, 116, 71, 116, 159, 120, 36, 116, 116, 118, 30, 245, 116, 73, 114, 73, 200, 159, 116, 116, 202, 114, 114, 157, 251, 32, 116, 114, 71, 116, 71, 157, 71, 32, 116, 114, 206, 204, 116, 116, 116, 116, 116, 116, 116, 167, 116, 116, 167, 116, 157, 71, 245, 116, 157, 116, 114, 116, 118, 116, 116, 200, 114, 157, 167, 116, 157, 243, 116, 71, 116, 116, 30, 32, 116, 116, 118, 157, 71, 114, 116, 116, 116, 116, 116, 71, 71, 116, 200, 243, 202, 116, 116, 114, 116, 116, 114, 116, 71, 116, 116, 116, 243, 114, 200, 71, 200, 157, 200, 157, 200, 71, 75, 36, 32, 116, 114, 116, 116, 71, 116, 116, 116, 251, 32, 116, 116, 157, 114, 36, 157, 157, 73, 116, 116, 116, 167, 116, 243, 157, 243, 157, 116, 116, 116, 208, 243, 32, 71, 157, 71, 77, 71, 71, 116, 116, 114, 116, 116, 114, 71, 208, 206, 159, 251, 32, 116, 36, 116, 114, 116, 120, 34, 36, 36, 116, 114, 157, 251, 32, 116, 247, 116, 116, 116, 71, 71, 157, 116, 114, 202, 75, 116, 32, 36, 36, 116, 114, 114, 116, 116, 73, 71, 116, 71, 73, 157, 118, 200, 116, 157, 114, 157, 71, 32, 116, 114, 157, 116, 114, 116, 116, 116, 36, 116, 71, 157, 116, 116, 114, 73, 159, 202, 116, 116, 114, 116, 116, 116, 116, 114, 116, 36, 116, 30, 114, 116, 116, 32, 36, 73, 116, 116, 116, 116, 116, 71, 116, 116, 116, 157, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 116, 114, 116, 116, 71, 116, 71, 159, 245, 116, 116, 206, 163, 116, 116, 116, 159, 159, 73, 157, 157, 116, 116, 116, 71, 116, 116, 114, 251, 32, 116, 114, 159, 116, 32, 157, 157, 114, 157, 73, 116, 157, 159, 200, 32, 116, 157, 114, 36, 157, 157, 73, 116, 116, 116, 167, 116, 243, 157, 243, 157, 116, 116, 116, 208, 243, 32, 71, 71, 32, 116, 71, 116, 116, 116, 71, 114, 114, 157, 114, 204, 116, 114, 116, 200, 161, 114, 114, 73, 73, 116, 116, 71, 114, 114, 116, 114, 116, 116, 114, 73, 116, 73, 116, 71, 202, 73, 116, 71, 36, 159, 116, 114, 157, 200, 71, 157, 251, 32, 114, 116, 116, 75, 114, 116, 32, 157, 116, 157, 251, 116, 114, 157, 251, 32, 157, 116, 71, 116, 32, 157, 116, 118, 114, 118, 116, 161, 159, 118, 245, 251, 32, 116, 116, 32, 36, 36, 114, 116, 114, 114, 73, 114, 73, 116, 116, 118, 114, 71, 116, 208, 206, 114, 206, 30, 32, 116, 71, 116, 114, 202, 116, 71, 36, 32, 116, 116, 116, 114, 71, 73, 71, 116, 114, 157, 116, 114, 114, 157, 36, 36, 116, 116, 116, 73, 116, 116, 116, 202, 71, 32, 114, 157, 157, 118, 114, 71, 30, 120, 163, 116, 157, 32, 116, 71, 116, 202, 243, 157, 157, 116, 159, 32, 116, 116, 71, 159, 71, 116, 32, 30, 73, 71, 116, 159, 165, 159, 206, 32, 114, 159, 159, 202, 116, 71, 116, 114, 118, 71, 116, 71, 116, 157, 200, 73, 114, 73, 116, 71, 116, 114, 75, 200, 116, 116, 71, 116, 32, 157, 73, 116, 30, 73, 71, 116, 159, 116, 71, 116, 71, 116, 202, 157, 116, 116, 116, 116, 34, 157, 114, 71, 200, 202, 116, 243, 71, 116, 243, 157, 251, 32, 114, 243, 243, 36, 36, 116, 114, 157, 251, 32, 116, 36, 36, 116, 114, 157, 200, 116, 71, 116, 114, 32, 159, 116, 116, 114, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 114, 116, 114, 157, 251, 32, 75, 36, 36, 116, 114, 157, 251, 32, 159, 243, 116, 161, 159, 118, 159, 157, 116, 116, 114, 116, 116, 114, 157, 73, 157, 159, 200, 73, 71, 116, 30, 118, 71, 116, 116, 251, 32, 157, 116, 116, 71, 71, 32, 118, 200, 157, 251, 32, 114, 71, 159, 75, 116, 165, 71, 116, 114, 114, 159, 75, 251, 32, 71, 159, 114, 114, 114, 116, 116, 116, 116, 116, 116, 71, 159, 161, 114, 114, 30, 114, 206, 71, 116, 116, 116, 116, 116, 114, 157, 200, 159, 30, 30, 157, 116, 73, 200, 157, 200, 73, 73, 118, 114, 159, 116, 116, 116, 114, 157, 159, 159, 200, 32, 116, 200, 116, 157, 32, 157, 200, 200, 114, 118, 116, 114, 116, 157, 34, 116, 116, 116, 71, 36, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 32, 36, 157, 157, 243, 116, 157, 71, 116, 163, 114, 116, 157, 71, 32, 116, 114, 157, 200, 116, 116, 71, 200, 159, 114, 122, 159, 202, 116, 71, 71, 243, 204, 243, 243, 159, 116, 114, 116, 116, 116, 116, 116, 243, 73, 30, 73, 71, 116, 200, 243, 71, 116, 116, 116, 114, 116, 116, 114, 157, 116, 30, 71, 71, 116, 116, 116, 116, 116, 116, 71, 73, 114, 116, 116, 116, 71, 116, 32, 157, 73, 30, 157, 116, 114, 157, 251, 32, 157, 116, 157, 116, 167, 116, 157, 71, 32, 71, 116, 116, 159, 159, 116, 116, 116, 157, 116, 114, 71, 200, 73, 73, 116, 71, 36, 73, 251, 116, 71, 159, 116, 116, 208, 206, 159, 251, 200, 116, 157, 116, 114, 116, 200, 73, 71, 116, 71, 73, 157, 118, 200, 116, 114, 116, 157, 116, 157, 116, 167, 116, 157, 71, 32, 116, 116, 159, 159, 116, 116, 116, 71, 73, 32, 116, 157, 30, 71, 116, 200, 114, 71, 114, 157, 71, 116, 114, 116, 114, 116, 200, 116, 71, 116, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 243, 114, 116, 116, 71, 200, 206, 36, 159, 157, 71, 157, 71, 32, 116, 71, 116, 116, 202, 73, 71, 116, 157, 157, 251, 32, 116, 116, 116, 116, 116, 71, 157, 116, 116, 71, 116, 71, 116, 116, 251, 116, 32, 202, 116, 116, 116, 71, 116, 116, 116, 73, 206, 116, 73, 114, 36, 114, 116, 114, 32, 245, 114, 73, 114, 159, 30, 32, 157, 157, 157, 116, 71, 71, 206, 245, 71, 116, 32, 116, 71, 116, 159, 114, 204, 116, 114, 204, 116, 114, 116, 116, 116, 77, 71, 34, 114, 32, 116, 157, 71, 116, 116, 200, 71, 114, 200, 73, 202, 73, 114, 116, 116, 71, 116, 114, 114, 157, 116, 116, 116, 116, 116, 116, 71, 116, 32, 157, 200, 30, 75, 200, 116, 200, 116, 71, 116, 116, 118, 114, 71, 200, 114, 71, 36, 32, 116, 116, 116, 71, 116, 114, 116, 73, 116, 116, 116, 30, 116, 159, 71, 157, 116, 247, 116, 36, 157, 157, 73, 116, 116, 116, 167, 116, 116, 32, 36, 36, 36, 243, 157, 32, 157, 200, 200, 114, 159, 30, 116, 251, 32, 159, 159, 200, 32, 36, 114, 32, 71, 200, 71, 114, 118, 73, 159, 202, 116, 159, 73, 36, 30, 114, 157, 251, 32, 243, 202, 200, 114, 157, 251, 116, 71, 116, 116, 71, 71, 116, 116, 116, 36, 114, 71, 32, 116, 114, 157, 116, 114, 116, 116, 116, 36, 116, 71, 34, 73, 73, 124, 30, 116, 114, 116, 71, 116, 159, 71, 200, 71, 75, 36, 73, 73, 116, 116, 71, 116, 120, 116, 200, 159, 202, 71, 116, 116, 77, 200, 30, 163, 157, 161, 114, 114, 73, 73, 71, 116, 30, 114, 116, 116, 116, 116, 71, 200, 32, 243, 202, 200, 200, 71, 116, 116, 71, 200, 157, 114, 116, 73, 251, 32, 114, 157, 118, 32, 73, 116, 114, 73, 116, 116, 114, 206, 159, 30, 116, 118, 116, 116, 114, 157, 73, 71, 73, 202, 120, 159, 159, 206, 30, 32, 116, 159, 159, 159, 116, 34, 116, 116, 159, 71, 157, 116, 247, 116, 36, 32, 116, 36, 116, 71, 157, 116, 114, 116, 73, 243, 114, 116, 206, 71, 114, 159, 116, 116, 116, 116, 116, 71, 71, 251, 32, 245, 114, 73, 71, 116, 120, 34, 116, 202, 243, 71, 71, 116, 32, 116, 116, 116, 116, 245, 30, 71, 73, 71, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 116, 167, 116, 32, 116, 116, 73, 116, 71, 36, 116, 32, 243, 30, 116, 200, 243, 114, 157, 159, 30, 157, 116, 118, 114, 118, 200, 206, 30, 71, 157, 32, 118, 71, 116, 71, 36, 116, 114, 157, 30, 114, 114, 116, 200, 161, 114, 114, 73, 32, 116, 116, 116, 73, 116, 116, 71, 30, 116, 116, 116, 116, 116, 116, 157, 116, 200, 116, 116, 116, 116, 71, 71, 206, 116, 73, 114, 116, 116, 8, 228, 191, 252, 252, 218, 235, 218, 184, 167, 133, 133, 201, 167, 235, 167, 201, 184, 167, 48, 218, 201, 236, 252, 252, 45, 228, 8, 45, 191, 227, 188, 7, 184, 235, 218, 167, 235, 235, 235, 14, 15, 167, 116, 201, 184, 184, 235, 99, 235, 252, 225, 252, 252, 252, 225, 252, 133, 184, 201, 184, 235, 82, 201, 167, 235, 225, 252, 252, 48, 9, 0, 11, 252, 252, 252, 252, 7, 184, 201, 218, 235, 201, 4, 252, 167, 201, 235, 14, 235, 201, 184, 218, 201, 201, 219, 201, 99, 116, 31, 201, 99, 133, 235, 1, 2, 11, 9, 9, 12, 116, 218, 235, 14, 3, 5, 218, 184, 236, 5, 184, 184, 184, 1, 252, 252, 252, 252, 252, 133, 167, 235, 235, 184, 235, 184, 167, 184, 150, 48, 218, 133, 219, 31, 201, 218, 235, 236, 235, 218, 167, 167, 133, 133, 201, 167, 235, 167, 201, 184, 167, 48, 218, 201, 252, 252, 252, 252, 252, 252, 252, 225, 225, 252, 15, 14, 5, 150, 235, 99, 13, 4, 133, 31, 184, 218, 10, 201, 218, 184, 5, 252, 252, 252, 252, 184, 167, 0, 235, 252, 252, 252, 252, 252, 252, 8, 8, 191, 8, 118, 228, 45, 81, 45, 45, 8, 228, 118, 8, 45, 191, 191, 8, 45, 118, 252, 252, 252, 252, 45, 228, 118, 8, 228, 81, 154, 44, 191, 45, 198, 252, 4, 3, 12, 65, 218, 218, 252, 252, 252, 252, 235, 184, 184, 235, 201, 252, 252, 252, 225, 225, 252, 15, 235, 82, 201, 167, 235, 225, 252, 252, 48, 9, 0, 11, 252, 252, 252, 252, 7, 184, 201, 218, 235, 201, 4, 252, 167, 201, 235, 14, 235, 201, 184, 218, 201, 201, 219, 201, 99, 201, 15, 3, 14, 11, 0, 0, 15, 235, 218, 12, 3, 7, 201, 167, 201, 218, 7, 218, 235, 31, 235, 235, 184, 235, 167, 201, 2, 9, 201, 99, 184, 184, 150, 99, 48, 9, 0, 184, 167, 235, 184, 0, 31, 184, 218, 0, 218, 13, 184, 1, 7, 4, 6, 2, 14, 12, 13, 235, 201, 235, 8, 65, 184, 235, 12, 14, 12, 13, 3, 8, 6, 14, 2, 11, 12, 8, 0, 184, 253, 201, 116, 235, 13, 12, 15, 10, 3, 201, 99, 218, 201, 14, 184, 201, 167, 201, 11, 184, 31, 133, 7, 236, 253, 31, 184, 201, 167, 133, 235, 13, 167, 201, 184, 201, 235, 6, 4, 12, 14, 8, 1, 1, 4, 0, 150, 218, 218, 1, 184, 235, 201, 13, 201, 31, 184, 133, 133, 201, 15, 14, 5, 150, 235, 99, 13, 4, 133, 31, 184, 5, 7, 9, 235, 9, 15, 235, 218, 2, 1, 0, 8, 1, 5, 11, 9, 235, 133, 235, 10, 9, 11, 14, 31, 184, 65, 13, 6, 167, 218, 235, 235, 201, 235, 219, 116, 235, 201, 219, 201, 99, 8, 3, 7, 0, 7, 11, 15, 9, 4, 13, 15, 6, 7, 15, 15, 5, 4, 5, 0, 14, 7, 1, 6, 12, 15, 7, 9, 11, 4, 13, 3, 11, 0, 4, 15, 3, 5, 0, 11, 12, 5, 12, 3, 12, 14, 3, 31, 184, 1, 10, 133, 201, 167, 12, 8, 10, 12, 0, 1, 150, 219, 235, 82, 12, 10, 235, 184, 201, 9, 2, 14, 15, 14, 10, 7, 10, 4, 15, 118, 191, 228, 154, 8, 117, 228, 45, 118, 118, 118, 228, 118, 45, 228, 228, 45, 228, 45, 81, 44, 81, 154, 44, 45, 191, 8, 45, 118, 228, 228, 118, 8, 191, 191, 45, 153, 118, 45, 79, 81, 45, 228, 45, 228, 45, 80, 81, 118, 7, 45, 227, 81, 153, 154, 154, 118, 78, 116, 228, 117, 118, 228, 227, 153, 154, 118, 45, 228, 118, 228, 7, 44, 227, 118, 228, 118, 228, 8, 228, 154, 118, 154, 228, 7, 44, 45, 81, 7, 228, 81, 44, 227, 8, 189, 81, 228, 45, 45, 154, 45, 154, 44, 80, 228, 8, 45, 154, 228, 45, 118, 8, 12, 3, 228, 118, 191, 8, 4, 10, 15, 10, 235, 235, 184, 6, 1, 2, 11, 9, 9, 12, 116, 218, 235, 14, 3, 5, 218, 184, 236, 5, 184, 14, 191, 45, 81, 118, 228, 118, 191, 8, 44, 0, 4, 7, 13, 8, 6, 14, 7, 1, 1, 11, 13, 9, 184, 219, 150, 167, 12, 218, 201, 218, 184, 8, 45, 191, 8, 45, 227, 7, 45, 118, 5, 45, 81, 81, 43, 45, 191, 191, 228, 118, 8, 8, 191, 45, 44, 45, 228, 45, 118, 228, 45, 228, 228, 191, 226, 8, 154, 45, 153, 8, 81, 191, 79, 117, 44, 81, 190, 12, 150, 167, 7, 184, 235, 218, 167, 235, 235, 1, 7, 8, 3, 12, 218, 150, 99, 8, 252, 252, 235, 201, 218, 13, 235, 184, 8, 6, 13, 0, 7, 167, 201, 235, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 235, 235, 184, 3, 8, 2, 11, 14, 201, 201, 14, 2, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 235, 235, 184, 3, 8, 2, 11, 14, 201, 201, 14, 2, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 235, 11, 14, 12, 118, 45, 79, 81, 45, 228, 45, 228, 45, 80, 81, 118, 7, 45, 227, 81, 153, 154, 154, 118, 78, 116, 228, 117, 118, 228, 227, 153, 154, 118, 45, 228, 118, 228, 7, 44, 227, 118, 228, 118, 228, 8, 228, 154, 118, 154, 228, 7, 44, 45, 81, 7, 228, 81, 44, 227, 8, 189, 81, 228, 45, 45, 154, 45, 154, 44, 80, 228, 8, 45, 218, 235, 167, 167, 235, 31, 65, 235, 184, 133, 150, 2, 1, 1, 7, 201, 133, 184, 7, 1, 15, 99, 235, 201, 218, 133, 218, 99, 8, 218, 235, 133, 235, 4, 4, 184, 150, 185, 2, 4, 218, 0, 218, 13, 184, 201, 5, 116, 167, 218, 10, 2, 235, 218, 218, 235, 31, 184, 235, 9, 15, 235, 218, 2, 1, 0, 8, 1, 5, 11, 9, 235, 133, 235, 10, 9, 11, 14, 31, 184, 65, 13, 6, 167, 218, 235, 235, 201, 235, 219, 116, 235, 201, 219, 201, 12, 11, 10, 252, 252, 225, 225, 228, 7, 44, 45, 252, 252, 0, 9, 2, 6, 5, 8, 3, 15, 11, 13, 218, 82, 235, 235, 167, 6, 1, 1, 8, 133, 235, 116, 201, 218, 99, 218, 65, 99, 235, 167, 201, 218, 252, 252, 252, 15, 14, 14, 235, 150, 167, 8, 252, 225, 252, 184, 201, 218, 235, 201, 235, 235, 235, 9, 184, 219, 150, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 235, 9, 1, 201, 235, 201, 235, 11, 252, 235, 184, 7, 5, 118, 45, 228, 118, 228, 45, 43, 228, 228, 152, 191, 13, 8, 118, 228, 201, 218, 99, 218, 65, 99, 235, 167, 201, 218, 252, 252, 252, 15, 14, 14, 235, 150, 167, 8, 252, 225, 252, 184, 201, 218, 235, 201, 235, 235, 235, 9, 184, 219, 150, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 235, 9, 1, 201, 235, 201, 235, 11, 252, 235, 184, 184, 235, 235, 218, 235, 225, 252, 252, 198, 116, 2, 4, 11, 5, 2, 252, 236, 202, 218, 82, 235, 235, 167, 116, 225, 252, 225, 252, 252, 117, 8, 228, 191, 252, 252, 218, 235, 252, 252, 225, 252, 252, 252, 252, 252, 167, 201, 218, 252, 235, 218, 235, 167, 167, 235, 31, 65, 235, 184, 133, 150, 2, 1, 1, 7, 201, 133, 184, 7, 1, 15, 99, 235, 154, 45, 154, 44, 80, 228, 8, 45, 154, 228, 45, 118, 8, 12, 3, 228, 118, 191, 8, 4, 10, 15, 10, 235, 235, 184, 6, 1, 2, 11, 9, 9, 7, 12, 252, 252, 225, 225, 228, 7, 44, 45, 252, 252, 0, 9, 2, 6, 13, 14, 8, 117, 8, 227, 228, 81, 154, 118, 189, 118, 6})
fuzzDicts = append(fuzzDicts, []byte{55, 164, 48, 236, 208, 91, 175, 119, 55, 16, 152, 186, 1, 148, 12, 232, 134, 85, 244, 107, 254, 49, 240, 6, 150, 163, 196, 125, 194, 152, 37, 90, 253, 105, 18, 91, 161, 152, 1, 8, 2, 30, 83, 40, 209, 34, 102, 168, 232, 104, 99, 128, 81, 32, 95, 82, 82, 82, 230, 39, 121, 17, 41, 3, 131, 1, 40, 16, 16, 138, 199, 132, 179, 26, 93, 12, 115, 108, 4, 192, 193, 167, 169, 21, 155, 209, 69, 98, 148, 196, 40, 10, 33, 99, 140, 49, 6, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 180, 42, 72, 4, 196, 67, 161, 68, 30, 11, 37, 65, 14, 195, 72, 142, 99, 78, 57, 196, 142, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 3, 3, 1, 7, 15, 5, 1, 14, 1, 1, 7, 8, 4, 1, 1, 14, 5, 8, 7, 0, 1, 14, 1, 1, 7, 13, 4, 1, 1, 14, 5, 8, 6, 14, 5, 13, 13, 14, 5, 12, 10, 1, 0, 2, 11, 5, 1, 6, 1, 12, 9, 6, 15, 6, 12, 1, 0, 3, 10, 14, 12, 1, 14, 12, 1, 1, 1, 7, 13, 1, 1, 9, 11, 6, 14, 14, 14, 4, 2, 11, 3, 0, 8, 4, 13, 1, 4, 10, 10, 12, 11, 6, 14, 14, 1, 1, 15, 4, 7, 1, 6, 15, 3, 3, 2, 1, 9, 1, 1, 1, 1, 9, 11, 11, 15, 6, 1, 7, 15, 5, 1, 14, 1, 14, 12, 0, 15, 1, 14, 13, 3, 3, 1, 0, 1, 14, 1, 15, 7, 2, 1, 7, 10, 1, 3, 11, 15, 5, 11, 7, 15, 13, 7, 1, 4, 7, 1, 14, 12, 1, 14, 12, 0, 1, 3, 6, 0, 2, 11, 1, 1, 1, 1, 14, 1, 1, 1, 1, 4, 11, 10, 13, 4, 8, 7, 14, 1, 11, 13, 9, 1, 3, 13, 13, 7, 14, 4, 1, 0, 1, 1, 10, 3, 7, 1, 4, 14, 6, 14, 11, 3, 2, 1, 15, 11, 2, 1, 1, 1, 1, 1, 3, 2, 12, 1, 9, 3, 12, 4, 8, 2, 2, 0, 5, 1, 12, 4, 5, 15, 12, 6, 15, 12, 8, 2, 13, 4, 12, 1, 7, 13, 10, 8, 13, 1, 1, 14, 1, 1, 7, 8, 4, 1, 9, 1, 5, 9, 9, 1, 10, 15, 15, 13, 6, 13, 3, 0, 1, 12, 15, 8, 9, 8, 14, 12, 1, 7, 2, 3, 2, 1, 15, 11, 9, 8, 0, 7, 13, 3, 14, 2, 12, 1, 9, 3, 12, 11, 11, 0, 14, 15, 9, 11, 12, 0, 2, 1, 4, 1, 1, 1, 8, 2, 10, 0, 1, 7, 15, 5, 11, 1, 1, 7, 13, 4, 5, 1, 1, 7, 8, 14, 12, 8, 9, 9, 9, 13, 4, 0, 12, 2, 7, 9, 5, 13, 3, 13, 10, 13, 14, 6, 11, 0, 0, 11, 6, 6, 4, 10, 1, 4, 2, 14, 8, 14, 14, 15, 5, 10, 1, 9, 8, 9, 8, 0, 1, 1, 7, 14, 1, 9, 10, 12, 15, 1, 0, 1, 7, 9, 7, 13, 2, 14, 0, 10, 6, 2, 8, 2, 10, 9, 1, 8, 5, 15, 10, 10, 1, 4, 2, 14, 1, 1, 1, 7, 14, 1, 8, 3, 12, 6, 4, 1, 1, 10, 1, 12, 3, 12, 12, 1, 9, 3, 12, 3, 10, 7, 5, 7, 10, 6, 9, 14, 1, 7, 7, 1, 8, 8, 7, 0, 1, 7, 6, 14, 11, 5, 7, 10, 5, 11, 7, 12, 8, 6, 9, 14, 7, 1, 13, 3, 14, 7, 15, 3, 5, 3, 1, 0, 1, 15, 5, 12, 10, 10, 9, 11, 6, 14, 14, 1, 3, 2, 2, 7, 5, 14, 1, 8, 5, 15, 10, 10, 1, 4, 2, 14, 1, 12, 1, 14, 11, 1, 1, 7, 4, 13, 12, 1, 7, 10, 1, 1, 1, 1, 1, 13, 12, 15, 0, 1, 7, 14, 13, 199, 66, 227, 112, 123, 5, 197, 59, 33, 174, 191, 13, 97, 72, 254, 162, 125, 107, 206, 221, 129, 194, 109, 242, 217, 217, 127, 113, 172, 217, 172, 217, 217, 172, 172, 217, 172, 217, 127, 127, 172, 127, 217, 127, 225, 85, 147, 154, 221, 186, 127, 217, 172, 172, 172, 217, 172, 217, 248, 248, 127, 217, 172, 217, 37, 172, 172, 82, 217, 217, 217, 217, 217, 68, 217, 37, 172, 172, 127, 217, 172, 217, 217, 217, 172, 82, 217, 217, 172, 172, 217, 172, 172, 203, 82, 172, 172, 248, 127, 172, 217, 217, 127, 82, 172, 217, 4, 161, 100, 235, 69, 150, 72, 105, 221, 146, 175, 233, 177, 91, 13, 205, 168, 88, 203, 61, 96, 4, 106, 194, 20, 222, 136, 104, 120, 99, 220, 217, 236, 20, 100, 179, 29, 212, 10, 242, 198, 91, 143, 69, 149, 50, 73, 118, 182, 226, 237, 241, 151, 142, 41, 193, 59, 175, 14, 68, 235, 69, 100, 83, 217, 217, 172, 172, 217, 48, 215, 61, 187, 54, 151, 125, 185, 100, 178, 163, 11, 44, 18, 44, 45, 43, 154, 217, 217, 172, 127, 217, 127, 217, 37, 23, 172, 248, 44, 121, 192, 185, 79, 66, 115, 21, 69, 111, 198, 94, 175, 67, 53, 151, 93, 100, 110, 176, 84, 133, 255, 239, 230, 60, 123, 126, 182, 51, 160, 164, 11, 176, 216, 36, 162, 172, 87, 75, 20, 25, 17, 13, 249, 188, 134, 174, 39, 69, 174, 126, 188, 111, 238, 225, 103, 122, 88, 54, 231, 236, 20, 100, 179, 29, 212, 10, 242, 198, 91, 143, 69, 149, 50, 73, 118, 182, 226, 237, 241, 151, 142, 41, 193, 59, 175, 14, 68, 235, 69, 100, 83, 178, 13, 173, 154, 53, 44, 61, 12, 126, 40, 170, 206, 172, 127, 217, 217, 217, 127, 82, 217, 217, 127, 217, 203, 217, 217, 217, 172, 172, 203, 217, 170, 249, 156, 208, 63, 247, 110, 4, 50, 132, 29, 146, 187, 167, 222, 65, 161, 247, 43, 91, 93, 78, 242, 157, 146, 139, 147, 128, 33, 106, 159, 212, 12, 217, 217, 217, 217, 68, 217, 37, 172, 172, 127, 217, 172, 217, 217, 217, 172, 82, 217, 217, 172, 172, 217, 172, 172, 203, 82, 172, 172, 248, 127, 172, 217, 217, 127, 82, 172, 217, 127, 172, 82, 217, 217, 172, 172, 217, 172, 172, 172, 217, 172, 217, 82, 217, 172, 213, 169, 22, 127, 193, 118, 20, 175, 216, 28, 84, 67, 157, 255, 194, 232, 190, 36, 165, 18, 46, 153, 105, 33, 8, 251, 138, 62, 136, 78, 47, 59, 186, 78, 159, 226, 30, 19, 127, 248, 217, 82, 172, 172, 172, 203, 217, 217, 172, 37, 127, 37, 172, 172, 172, 127, 82, 248, 82, 82, 248, 217, 172, 172, 172, 127, 217, 217, 172, 45, 40, 49, 172, 226, 173, 180, 28, 162, 223, 96, 162, 46, 35, 18, 59, 61, 126, 143, 22, 26, 23, 207, 185, 185, 224, 170, 179, 253, 166, 125, 139, 157, 147, 60, 137, 38, 163, 239, 203, 27, 9, 98, 8, 44, 194, 137, 79, 235, 226, 11, 172, 217, 172, 172, 82, 82, 217, 217, 217, 172, 172, 217, 217, 68, 203, 217, 113, 243, 240, 20, 157, 57, 113, 2, 237, 23, 28, 205, 46, 96, 168, 40, 149, 172, 228, 72, 203, 80, 18, 247, 63, 149, 248, 69, 182, 223, 137, 6, 11, 55, 37, 66, 123, 95, 203, 47, 194, 76, 12, 102, 255, 3, 3, 94, 170, 242, 78, 151, 63, 41, 138, 150, 215, 201, 202, 166, 214, 182, 185, 78, 30, 146, 3, 105, 55, 8, 102, 155, 61, 122, 103, 175, 142, 156, 235, 35, 43, 251, 54, 213, 150, 209, 157, 141, 180, 248, 181, 148, 7, 105, 103, 19, 118, 185, 88, 248, 218, 166, 153, 3, 195, 183, 161, 216, 199, 130, 186, 204, 234, 160, 255, 5, 104, 183, 67, 71, 172, 217, 217, 172, 217, 127, 127, 172, 210, 86, 61, 236, 82, 255, 116, 230, 26, 19, 80, 6, 37, 217, 37, 172, 217, 217, 217, 127, 113, 172, 217, 172, 217, 217, 172, 172, 217, 217, 82, 217, 217, 127, 217, 172, 172, 172, 217, 172, 217, 248, 248, 127, 217, 172, 217, 37, 172, 172, 82, 217, 217, 172, 172, 217, 172, 172, 203, 217, 217, 172, 172, 217, 217, 217, 183, 169, 59, 180, 238, 208, 211, 54, 241, 244, 17, 161, 153, 72, 68, 68, 31, 194, 113, 32, 165, 178, 76, 67, 153, 83, 218, 152, 176, 117, 232, 180, 228, 25, 17, 13, 249, 188, 134, 174, 39, 69, 239, 230, 60, 123, 126, 182, 51, 160, 189, 165, 100, 203, 218, 71, 212, 127, 28, 68, 77, 127, 74, 26, 199, 42, 95, 159, 28, 122, 37, 190, 17, 222, 228, 124, 38, 246, 67, 126, 27, 150, 180, 16, 245, 57, 51, 204, 231, 96, 153, 238, 225, 103, 122, 88, 54, 231, 236, 20, 100, 179, 178, 237, 141, 22, 246, 99, 213, 200, 112, 108, 133, 0, 215, 98, 209, 66, 191, 127, 223, 86, 97, 53, 3, 125, 138, 231, 3, 51, 92, 120, 59, 54, 238, 65, 106, 111, 148, 202, 141, 233, 24, 215, 221, 199, 66, 55, 95, 209, 243, 35, 21, 142, 247, 43, 91, 93, 78, 242, 157, 146, 139, 147, 128, 33, 106, 159, 212, 12, 217, 217, 217, 217, 68, 217, 6, 29, 215, 208, 208, 1, 17, 40, 28, 255, 170, 141, 25, 82, 106, 97, 106, 21, 180, 142, 229, 253, 115, 10, 107, 27, 20, 214, 181, 168, 13, 117, 229, 227, 211, 62, 88, 253, 114, 99, 199, 74, 175, 240, 1, 107, 183, 79, 17, 187, 57, 61, 231, 45, 51, 195, 102, 138, 99, 98, 186, 116, 158, 70, 9, 216, 168, 138, 175, 99, 252, 112, 19, 56, 57, 44, 36, 147, 59, 168, 170, 162, 92, 58, 78, 243, 240, 209, 242, 152, 246, 49, 145, 234, 82, 246, 246, 246, 246, 246, 246, 246, 246, 109, 213, 41, 180, 12, 202, 48, 7, 29, 51, 52, 13, 125, 191, 68, 65, 159, 96, 109, 109, 109, 96, 109, 96, 109, 45, 57, 46, 112, 109, 109, 109, 78, 28, 94, 170, 124, 183, 27, 109, 109, 96, 109, 109, 109, 109, 109, 96, 109, 213, 172, 188, 121, 18, 51, 121, 63, 66, 187, 95, 109, 109, 96, 96, 96, 109, 109, 96, 96, 109, 96, 109, 109, 109, 245, 238, 130, 109, 83, 109, 109, 109, 109, 109, 96, 96, 140, 208, 184, 95, 96, 151, 195, 120, 127, 109, 38, 233, 107, 100, 109, 5, 239, 77, 31, 43, 62, 108, 56, 7, 85, 55, 131, 176, 253, 109, 109, 109, 109, 109, 70, 200, 47, 226, 174, 94, 252, 238, 138, 30, 248, 62, 120, 243, 150, 67, 99, 121, 35, 159, 172, 89, 28, 187, 109, 109, 96, 109, 185, 130, 14, 224, 96, 109, 109, 109, 109, 109, 109, 109, 109, 109, 96, 109, 109, 109, 96, 109, 109, 109, 83, 96, 109, 109, 109, 109, 109, 109, 109, 109, 109, 83, 109, 109, 109, 109, 109, 109, 109, 109, 109, 96, 83, 109, 109, 109, 109, 109, 109, 14, 194, 7, 119, 183, 63, 226, 222, 179, 19, 19, 19, 19, 34, 192, 81, 207, 19, 19, 19, 19, 19, 19, 130, 168, 91, 221, 159, 130, 139, 28, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 198, 44, 218, 76, 232, 180, 23, 45, 150, 1, 137, 237, 214, 117, 1, 249, 89, 196, 30, 109, 109, 109, 109, 96, 96, 109, 109, 96, 109, 109, 109, 109, 109, 96, 109, 109, 96, 146, 36, 144, 144, 165, 128, 43, 17, 9, 129, 40, 180, 245, 93, 85, 213, 239, 229, 34, 129, 219, 153, 69, 69, 34, 34, 34, 83, 19, 109, 182, 119, 223, 2, 168, 225, 47, 211, 140, 171, 224, 196, 51, 96, 96, 109, 96, 96, 96, 125, 191, 68, 65, 159, 247, 26, 238, 109, 109, 109, 96, 109, 83, 109, 109, 96, 109, 109, 109, 109, 109, 109, 109, 109, 109, 96, 96, 83, 109, 109, 109, 109, 70, 109, 96, 109, 96, 109, 96, 96, 109, 96, 96, 109, 109, 96, 96, 109, 109, 109, 247, 94, 14, 146, 36, 109, 96, 109, 109, 109, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 109, 109, 109, 109, 83, 109, 109, 109, 96, 109, 83, 109, 109, 109, 109, 109, 83, 109, 109, 109, 96, 109, 109, 96, 96, 109, 109, 109, 44, 109, 109, 96, 109, 109, 109, 109, 109, 109, 109, 109, 109, 96, 109, 109, 109, 96, 109, 96, 96, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 109, 109, 83, 70, 109, 109, 109, 109, 109, 109, 83, 109, 109, 109, 83, 109, 109, 109, 109, 109, 109, 109, 109, 109, 96, 109, 109, 96, 109, 109, 146, 7, 73, 70, 54, 110, 125, 63, 242, 234, 245, 238, 130, 192, 199, 126, 151, 80, 233, 208, 13, 161, 186, 57, 96, 109, 109, 109, 96, 199, 126, 151, 80, 233, 208, 13, 161, 186, 60, 91, 40, 96, 109, 109, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 96, 109, 83, 96, 109, 96, 83, 109, 109, 109, 184, 212, 192, 113, 62, 108, 56, 7, 133, 131, 178, 135, 29, 80, 77, 161, 13, 149, 76, 137, 192, 111, 25, 244, 41, 184, 212, 126, 130, 250, 109, 192, 113, 62, 108, 56, 7, 109, 96, 109, 109, 109, 80, 77, 161, 13, 149, 76, 137, 192, 111, 25, 244, 41, 70, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 109, 109, 83, 70, 109, 109, 109, 109, 109, 109, 83, 109, 109, 109, 83, 109, 109, 109, 96, 109, 72, 222, 151, 130, 38, 233, 107, 100, 192, 137, 86, 152, 254, 186, 141, 30, 232, 38, 101, 219, 64, 226, 187, 115, 212, 170, 89, 92, 175, 5, 70, 151, 27, 105, 226, 52, 165, 114, 133, 101, 186, 192, 105, 175, 241, 0, 152, 151, 221, 40, 141, 236, 45, 150, 1, 137, 168, 24, 176, 170, 141, 120, 57, 214, 97, 50, 63, 189, 113, 218, 233, 2, 146, 185, 169, 126, 60, 19, 198, 44, 218, 76, 232, 180, 23, 45, 150, 1, 137, 237, 214, 117, 96, 5, 239, 77, 31, 43, 62, 108, 56, 7, 85, 55, 131, 176, 253, 109, 96, 151, 195, 120, 127, 109, 38, 233, 107, 100, 109, 5, 239, 77, 31, 43, 62, 108, 56, 7, 85, 55, 131, 176, 253, 109, 109, 109, 109, 109, 70, 200, 47, 226, 174, 94, 252, 238, 138, 30, 248, 62, 120, 243, 150, 67, 99, 121, 35, 159, 172, 89, 28, 187, 109, 109, 109, 109, 109, 109, 109, 196, 0, 111, 156, 241, 228, 53, 237, 99, 31, 67, 83, 102, 239, 163, 108, 7, 68, 87, 109, 109, 109, 109, 109, 109, 109, 109, 56, 7, 109, 96, 109, 109, 109, 80, 77, 161, 13, 149, 76, 137, 109, 109, 109, 96, 109, 96, 109, 109, 109, 96, 109, 96, 96, 96, 125, 191, 68, 65, 159, 247, 26, 109, 109, 109, 83, 96, 109, 25, 244, 41, 70, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 211, 12, 149, 109, 182, 119, 223, 2, 168, 225, 47, 211, 140, 171, 224, 196, 51, 54, 32, 83, 109, 109, 55, 223, 93, 142, 43, 51, 204, 240, 202, 56, 32, 245, 31, 120, 138, 14, 74, 126, 176, 176, 237, 167, 68, 55, 39, 208, 109, 96, 109, 109, 109, 83, 245, 73, 204, 36, 57, 219, 109, 96, 154, 225, 255, 83, 146, 84, 201, 65, 213, 41, 180, 12, 70, 109, 109, 109, 109, 109, 96, 109, 109, 109, 109, 109, 109, 83, 70, 109, 109, 109, 109, 109, 109, 109, 109, 100, 210, 174, 49, 32, 55, 223, 93, 142, 43, 51, 204, 240, 202, 56, 32, 245, 31, 120, 138, 14, 74, 126, 176, 176, 237, 167, 68, 55, 39, 208, 27, 128, 22, 111, 19, 19, 19, 19, 96, 109, 82, 82, 82, 3, 3, 13, 82, 225, 116, 18, 67, 7, 181, 154, 203, 165, 214, 225, 67, 18, 116, 18, 165, 67, 225, 225, 225, 225, 116, 67, 116, 18, 18, 225, 7, 214, 67, 7, 7, 116, 165, 214, 18, 214, 154, 18, 116, 67, 7, 7, 116, 116, 225, 241, 116, 67, 225, 18, 23, 225, 165, 143, 67, 18, 214, 154, 116, 225, 225, 225, 94, 214, 225, 165, 225, 56, 165, 225, 116, 7, 116, 105, 18, 105, 154, 10, 7, 6, 50, 8, 13, 15, 7, 11, 8, 0, 12, 10, 0, 39, 20, 1, 0, 14, 7, 3, 2, 10, 3, 236, 11, 13, 8, 15, 176, 227, 249, 164, 1, 0, 5, 7, 12, 168, 4, 5, 9, 14, 5, 80, 246, 56, 196, 82, 2, 4, 4, 11, 11, 106, 170, 11, 82, 146, 6, 13, 6, 82, 82, 82, 82, 82, 82, 12, 12, 0, 82, 82, 82, 82, 82, 82, 3, 9, 11, 15, 82, 82, 4, 13, 3, 12, 1, 2, 4, 4, 10, 7, 12, 12, 0, 82, 82, 82, 13, 13, 18, 7, 18, 94, 225, 18, 165, 214, 105, 225, 214, 4, 76, 13, 15, 9, 141, 240, 251, 176, 4, 77, 5, 4, 14, 15, 10, 18, 67, 18, 214, 241, 116, 18, 116, 225, 18, 225, 56, 165, 7, 45, 67, 165, 143, 165, 225, 225, 18, 225, 116, 225, 225, 18, 225, 225, 214, 18, 225, 67, 225, 225, 67, 67, 225, 116, 18, 18, 225, 116, 225, 56, 116, 165, 94, 121, 67, 186, 175, 6, 0, 9, 2, 14, 11, 85, 28, 187, 124, 214, 165, 5, 7, 82, 82, 225, 214, 165, 225, 7, 154, 67, 225, 116, 67, 225, 214, 225, 18, 67, 225, 45, 225, 214, 67, 82, 82, 4, 82, 82, 82, 233, 82, 82, 8, 154, 154, 12, 12, 8, 13, 10, 82, 82, 82, 82, 82, 82, 82, 82, 82, 8, 171, 5, 82, 82, 82, 82, 190, 7, 15, 82, 82, 82, 4, 10, 5, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 13, 0, 11, 82, 82, 82, 82, 82, 82, 82, 233, 82, 82, 82, 82, 82, 82, 13, 14, 3, 11, 82, 82, 82, 82, 82, 82, 67, 225, 56, 214, 18, 56, 116, 225, 105, 116, 18, 225, 67, 23, 18, 165, 105, 67, 214, 45, 225, 67, 154, 214, 116, 225, 67, 56, 225, 116, 165, 225, 165, 67, 225, 225, 154, 67, 18, 67, 214, 225, 56, 56, 225, 225, 225, 18, 45, 116, 18, 203, 225, 165, 165, 203, 225, 225, 116, 56, 192, 18, 18, 18, 225, 18, 154, 154, 12, 12, 8, 13, 10, 82, 82, 82, 82, 82, 82, 8, 8, 3, 2, 5, 12, 5, 9, 12, 34, 184, 13, 11, 9, 4, 1, 13, 14, 3, 11, 11, 11, 3, 4, 4, 3, 3, 4, 236, 11, 13, 5, 13, 8, 8, 11, 11, 87, 127, 106, 9, 9, 10, 3, 135, 155, 4, 22, 11, 13, 5, 14, 4, 4, 11, 1, 9, 10, 3, 4, 4, 3, 3, 4, 5, 9, 10, 1, 13, 13, 10, 11, 13, 13, 7, 8, 12, 0, 8, 8, 3, 4, 2, 64, 1, 2, 4, 4, 55, 10, 3, 18, 67, 165, 165, 165, 165, 6, 12, 5, 1, 0, 5, 7, 184, 150, 15, 124, 10, 4, 4, 1, 10, 0, 4, 14, 158, 226, 1, 165, 67, 13, 8, 3, 13, 236, 11, 13, 203, 213, 10, 12, 4, 4, 8, 7, 15, 5, 5, 4, 7, 67, 225, 225, 7, 67, 116, 214, 56, 82, 82, 82, 82, 6, 3, 6, 233, 82, 82, 82, 233, 82, 82, 82, 1, 3, 6, 8, 154, 82, 82, 82, 82, 225, 18, 154, 116, 67, 165, 7, 214, 18, 67, 225, 67, 67, 82, 82, 13, 11, 9, 4, 1, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 12, 12, 0, 12, 10, 82, 82, 82, 82, 233, 82, 82, 82, 82, 82, 82, 82, 82, 82, 3, 3, 13, 82, 82, 82, 8, 14, 15, 124, 82, 82, 82, 233, 233, 82, 3, 0, 13, 82, 82, 82, 82, 13, 11, 9, 4, 82, 82, 82, 82, 82, 82, 249, 13, 5, 13, 128, 82, 82, 9, 8, 1, 82, 82, 82, 82, 82, 82, 1, 0, 5, 7, 82, 105, 116, 214, 116, 214, 214, 225, 225, 116, 225, 105, 225, 165, 165, 225, 225, 67, 67, 105, 225, 18, 225, 56, 67, 18, 18, 7, 225, 214, 18, 18, 203, 116, 225, 192, 165, 18, 225, 56, 225, 116, 116, 67, 56, 225, 18, 18, 67, 203, 18, 225, 225, 225, 165, 165, 67, 225, 165, 214, 18, 225, 105, 7, 165, 165, 225, 18, 116, 203, 225, 18, 18, 116, 225, 3, 243, 8, 6, 1, 3, 7, 7, 4, 4, 1, 116, 18, 225, 18, 105, 7, 214, 225, 225, 225, 18, 116, 18, 18, 203, 225, 67, 165, 225, 225, 225, 56, 56, 214, 116, 225, 18, 214, 116, 225, 225, 203, 225, 56, 165, 18, 225, 214, 7, 11, 8, 12, 3, 1, 10, 11, 1, 13, 11, 9, 4, 1, 13, 14, 3, 11, 11, 11, 8, 6, 1, 116, 18, 225, 18, 45, 225, 45, 56, 67, 225, 116, 18, 214, 225, 214, 18, 67, 225, 34, 45, 67, 67, 67, 105, 225, 18, 225, 203, 165, 116, 67, 165, 82, 82, 4, 10, 5, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 13, 0, 11, 82, 82, 82, 82, 82, 82, 82, 233, 82, 82, 18, 18, 67, 7, 105, 67, 225, 67, 56, 67, 67, 18, 67, 116, 225, 165, 225, 154, 203, 225, 116, 56, 192, 225, 225, 252, 225, 45, 18, 18, 67, 154, 225, 18, 94, 67, 116, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 1, 10, 5, 6, 4, 14, 82, 82, 82, 82, 82, 82, 82, 13, 10, 67, 67, 154, 225, 214, 12, 3, 1, 10, 11, 1, 13, 5, 2, 11, 9, 1, 15, 14, 8, 8, 12, 15, 15, 12, 14, 10, 15, 12, 8, 12, 2, 11, 14, 12, 1, 4, 4, 7, 5, 6, 6, 5, 11, 9, 8, 1, 3, 6, 8, 3, 58, 86, 175, 8, 5, 8, 11, 5, 8, 8, 15, 2, 12, 3, 8, 6, 1, 3, 7, 7, 4, 6, 7, 11, 4, 8, 2, 2, 0, 2, 1, 1, 6, 11, 0, 4, 11, 2, 5, 12, 5, 9, 11, 4, 3, 12, 1, 2, 12, 2, 7, 9, 12, 4, 0, 11, 14, 13, 2, 7, 12, 14, 14, 5, 14, 12, 13, 0, 3, 7, 3, 1, 9, 13, 14, 5, 8, 12, 4, 13, 15, 2, 2, 14, 9, 4, 6, 8, 12, 2, 11, 11, 7, 4, 5, 10, 10, 9, 1, 4, 11, 9, 11, 0, 3, 14, 9, 5, 10, 9, 2, 12, 14, 15, 1, 9, 11, 3, 13, 4, 177, 73, 77, 249, 1, 2, 10, 12, 8, 9, 8, 11, 6, 5, 5, 7, 6, 1, 2, 0, 3, 8, 15, 8, 9, 13, 15, 13, 8, 9, 8, 11, 8, 7, 15, 1, 8, 14, 3, 2, 8, 11, 1, 13, 5, 2, 11, 9, 1, 7, 8, 11, 8, 12, 3, 1, 5, 1, 8, 1, 227, 15, 1, 5, 5, 4, 1, 3, 0, 13, 3, 12, 4, 13, 1, 4, 5, 1, 8, 1, 11, 5, 9, 9, 2, 1, 7, 15, 9, 14, 15, 1, 7, 1, 4, 2, 14, 13, 0, 13, 10, 15, 12, 9, 8, 14, 13, 3, 9, 11, 15, 1, 6, 3, 6, 0, 11, 6, 15, 14, 3, 4, 5, 9, 10, 12, 4, 4, 11, 7, 7, 7, 8, 8, 12, 1, 4, 14, 13, 0, 11, 9, 11, 0, 3, 14, 4, 8, 2, 8, 11, 1, 13, 10, 10, 13, 5, 14, 4, 5, 1, 8, 1, 11, 12, 15, 1, 9, 6, 9, 13, 14, 4, 5, 4, 13, 9, 8, 2, 14, 13, 0, 13, 15, 9, 8, 10, 4, 14, 0, 8, 7, 15, 14, 5, 9, 7, 5, 14, 3, 3, 13, 4, 177, 77, 249, 10, 0, 15, 12, 11, 14, 14, 4, 13, 9, 8, 2, 14, 8, 13, 12, 10, 9, 14, 5, 13, 7, 9, 10, 3, 3, 4, 10, 4, 4, 1, 10, 0, 4, 14, 3, 14, 12, 1, 4, 4, 7, 11, 2, 14, 10, 10, 13, 7, 0, 5, 10, 5, 14, 3, 12, 1, 4, 4, 11, 2, 5, 12, 5, 9, 10, 4, 2, 5, 12, 7, 14, 4, 1, 10, 4, 12, 11, 6, 9, 11, 2, 4, 8, 10, 8, 2, 15, 9, 2, 2, 9, 10, 12, 1, 1, 15, 11, 7, 0, 13, 1, 6, 13, 6, 4, 14, 8, 10, 14, 0, 14, 8, 6, 13, 11, 12, 122, 174, 8, 5, 11, 6, 3, 4, 4, 5, 9, 3, 14, 5, 13, 236, 11, 13, 5, 13, 8, 8, 11, 11, 3, 15, 3, 9, 11, 0, 3, 5, 13, 10, 13, 2, 6, 8, 3, 15, 12, 11, 6, 11, 8, 3, 7, 11, 8, 12, 3, 1, 10, 11, 1, 13, 11, 9, 4, 1, 8, 11, 10, 3, 7, 7, 13, 2, 4, 1, 13, 15, 6, 1, 3, 7, 5, 1, 5, 0, 10, 13, 6, 9, 7, 12, 12, 10, 5, 11, 10, 14, 0, 5, 14, 12, 1, 4, 4, 11, 2, 5, 12, 9, 10, 5, 14, 4, 10, 5, 6, 4, 14, 1, 6, 9, 3, 10, 15, 11, 6, 13, 0, 2, 12, 15, 11, 15, 8, 3, 15, 5, 10, 8, 5, 0, 7, 14, 12, 1, 12, 4, 4, 6, 11, 10, 6, 14, 2, 6, 5, 1, 6, 9, 9, 15, 5, 1, 4, 4, 10, 7, 5, 13, 9, 0, 14, 2, 7, 0, 12, 0, 10, 4, 6, 3, 1, 2, 8, 8, 3, 3, 13, 14, 3, 11, 8, 0, 10, 12, 14, 5, 8, 1, 7, 11, 5, 12, 5, 9, 11, 4, 4, 2, 14, 13, 0, 13, 10, 0, 11, 9, 4, 0, 8, 4, 11, 2, 5, 12, 5, 7, 12, 3, 1, 5, 1, 8, 1, 12, 10, 1, 7, 11, 15, 4, 8, 11, 6, 14, 8, 11, 15, 3, 0, 4, 1, 3, 0, 13, 3, 14, 14, 12, 1, 12, 0, 10, 10, 5, 4, 0, 5, 0, 11, 8, 6, 13, 12, 8, 3, 14, 5, 13, 4, 10, 4, 5, 9, 9, 10, 0, 3, 4, 4, 10, 0, 8, 12, 11, 6, 11, 6, 4, 9, 12, 12, 12, 1, 7, 9, 2, 11, 13, 9, 15, 2, 0, 5, 3, 8, 11, 4, 9, 5, 9, 3, 14, 5, 13, 236, 11, 13, 5, 15, 0, 6, 12, 5, 1, 0, 5, 7, 15, 12, 11, 0, 13, 3, 14, 14, 4, 9, 8, 13, 15, 9, 10, 4, 2, 5, 12, 7, 14, 4, 1, 10, 14, 2, 5, 1, 10, 7, 12, 12, 10, 5, 14, 12, 13, 0, 3, 1, 6, 1, 6, 1, 6, 15, 6, 166, 37, 56, 5, 13, 8, 11, 4, 9, 5, 9, 3, 12, 8, 11, 5, 5, 4, 8, 1, 11, 5, 9, 9, 2, 1, 15, 9, 8, 7, 5, 3, 8, 13, 10, 7, 6, 5, 1, 13, 13, 10, 11, 13, 7, 12, 10, 5, 14, 8, 4, 4, 7, 10, 12, 4, 4, 3, 3, 4, 5, 9, 10, 12, 4, 4, 11, 2, 5, 0, 12, 10, 9, 7, 6, 6, 13, 6, 4, 9, 12, 11, 11, 8, 6, 1, 3, 7, 7, 4, 6, 7, 11, 11, 13, 8, 3, 13, 1, 5, 1, 6, 1, 9, 1, 0, 14, 7, 3, 2, 10, 3, 5, 9, 4, 5, 2, 10, 11, 6, 3, 4, 4, 5, 9, 14, 5, 13, 236, 11, 13, 12, 4, 15, 9, 8, 1, 9, 6, 3, 3, 7, 11, 6, 6, 4, 6, 9, 4, 7, 1, 14, 8, 4, 11, 167, 201, 235, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 235, 235, 184, 3, 8, 2, 11, 14, 201, 201, 14, 2, 14, 235, 201, 184, 218, 5, 4, 3, 7, 11, 11, 45, 191, 8, 5, 14, 5, 8, 0, 4, 6, 15, 13, 7, 154, 228, 45, 118, 1, 3, 11, 12, 2, 11, 10, 4, 7, 2, 3, 2, 11, 4, 7, 3, 15, 12, 12, 116, 218, 235, 14, 3, 5, 218, 184, 167, 201, 235, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 235, 235, 8, 15, 1, 252, 252, 252, 15, 14, 0, 2, 2, 8, 8, 9, 2, 13, 11, 12, 8, 0, 184, 253, 201, 116, 235, 13, 12, 15, 10, 3, 201, 99, 218, 201, 14, 184, 201, 167, 201, 11, 184, 31, 133, 7, 236, 253, 31, 184, 201, 167, 133, 235, 13, 167, 201, 184, 201, 235, 6, 4, 12, 14, 8, 1, 1, 4, 0, 150, 218, 218, 1, 184, 235, 11, 7, 45, 227, 81, 153, 13, 191, 8, 45, 227, 7, 45, 118, 5, 45, 81, 81, 43, 45, 191, 191, 228, 118, 45, 228, 8, 1, 0, 6, 116, 218, 235, 14, 3, 5, 218, 184, 167, 201, 235, 14, 235, 201, 184, 218, 201, 235, 2, 2, 3, 5, 5, 0, 3, 15, 7, 8, 4, 15, 13, 2, 12, 10, 0, 0, 1, 4, 4, 0, 10, 133, 184, 7, 1, 9, 9, 0, 13, 2, 4, 10, 4, 4, 184, 150, 185, 2, 4, 218, 0, 8, 1, 1, 7, 7, 3, 14, 0, 4, 15, 3, 5, 0, 11, 12, 5, 12, 3, 12, 14, 3, 31, 184, 1, 10, 133, 201, 167, 12, 8, 10, 12, 0, 1, 13, 7, 8, 1, 12, 13, 13, 14, 11, 191, 191, 8, 45, 118, 252, 252, 252, 252, 45, 228, 118, 8, 228, 81, 154, 44, 191, 45, 198, 252, 4, 3, 12, 65, 218, 218, 252, 14, 235, 150, 167, 8, 252, 225, 252, 184, 201, 218, 235, 201, 2, 0, 0, 7, 5, 45, 79, 81, 45, 228, 3, 3, 0, 3, 12, 150, 167, 2, 6, 15, 14, 6, 10, 15, 13, 1, 3, 12, 12, 1, 15, 15, 3, 2, 9, 8, 13, 15, 6, 5, 0, 13, 1, 13, 14, 12, 4, 1, 6, 0, 12, 8, 201, 235, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 235, 235, 184, 3, 8, 2, 11, 14, 201, 201, 14, 2, 14, 235, 201, 184, 218, 201, 235, 218, 82, 10, 8, 15, 10, 235, 235, 184, 6, 1, 2, 11, 9, 9, 12, 116, 218, 235, 14, 14, 5, 13, 2, 12, 11, 252, 252, 252, 235, 9, 1, 201, 235, 1, 8, 133, 235, 116, 11, 9, 9, 7, 12, 252, 252, 225, 225, 228, 7, 44, 45, 252, 252, 0, 9, 2, 6, 13, 14, 8, 117, 8, 227, 228, 81, 154, 118, 189, 118, 6, 191, 191, 45, 226, 228, 118, 118, 227, 191, 191, 191, 8, 8, 191, 8, 118, 228, 45, 81, 45, 45, 8, 228, 118, 8, 45, 191, 191, 8, 45, 118, 154, 228, 118, 81, 45, 45, 8, 45, 45, 45, 228, 191, 81, 153, 8, 191, 8, 191, 227, 190, 227, 228, 118, 228, 11, 5, 8, 3, 15, 14, 0, 0, 11, 10, 4, 1, 2, 0, 2, 13, 11, 14, 0, 251, 37, 246, 118, 143, 133, 35, 17, 175, 185, 153, 120, 255, 182, 11, 239, 222, 100, 61, 86, 78, 33, 251, 75, 250, 112, 175, 27, 173, 202, 18, 187, 27, 254, 140, 132, 213, 231, 218, 209, 45, 17, 15, 173, 38, 156, 105, 253, 206, 166, 15, 65, 245, 191, 229, 29, 71, 42, 228, 130, 73, 103, 168, 28, 94, 170, 134, 143, 34, 248, 254, 33, 250, 123, 225, 189, 35, 158, 244, 163, 174, 171, 114, 221, 239, 226, 201, 195, 33, 239, 250, 90, 101, 50, 223, 235, 45, 36, 46, 7, 187, 23, 206, 84, 144, 193, 244, 111, 67, 116, 138, 153, 6, 250, 124, 107, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 198, 198, 198, 198, 198, 209, 192, 153, 247, 12, 12, 12, 12, 12, 12, 1, 10, 13, 15, 3, 12, 6, 3, 15, 3, 12, 6, 11, 9, 3, 11, 0, 6, 3, 5, 11, 1, 4, 11, 5, 11, 1, 4, 11, 5, 11, 2, 7, 0, 14, 4, 14, 7, 15, 14, 15, 8, 12, 12, 6, 15, 5, 15, 8, 12, 12, 6, 15, 5, 13, 1, 9, 4, 8, 10, 10, 14, 14, 10, 10, 10, 8, 5, 1, 5, 13, 7, 2, 8, 11, 3, 6, 3, 4, 13, 12, 6, 15, 5, 13, 1, 9, 4, 8, 15, 5, 13, 1, 9, 4, 138, 138, 32, 66, 171, 63, 5, 200, 200, 32, 180, 17, 66, 217, 6, 129, 220, 199, 121, 176, 156, 191, 250, 211, 34, 226, 27, 83, 126, 241, 22, 20, 111, 55, 81, 63, 5, 200, 32, 180, 17, 66, 138, 138, 32, 66, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 58, 201, 195, 191, 250, 211, 34, 226, 27, 83, 126, 241, 235, 235, 250, 126, 253, 215, 247, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 193, 120, 120, 120, 120, 120, 120, 120, 36, 36, 36, 209, 36, 36, 36, 209, 36, 36, 36, 36, 36, 209, 36, 36, 36, 5, 2, 11, 14, 4, 14, 15, 13, 4, 8, 1, 3, 6, 6, 11, 9, 3, 9, 14, 2, 3, 14, 8, 7, 0, 14, 6, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 2, 10, 13, 1, 5, 14, 15, 6, 4, 4, 6, 36, 36, 36, 15, 2, 36, 36, 36, 36, 2, 13, 7, 9, 9, 11, 4, 14, 36, 209, 36, 36, 36, 209, 36, 36, 36, 36, 36, 209, 7, 5, 11, 10, 72, 164, 36, 36, 36, 126, 36, 177, 72, 164, 36, 36, 36, 36, 36, 36, 36, 209, 3, 14, 7, 12, 216, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 5, 15, 8, 3, 209, 36, 36, 36, 36, 0, 13, 13, 36, 36, 126, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 130, 157, 51, 6, 14, 5, 13, 7, 8, 11, 4, 6, 3, 2, 36, 36, 36, 36, 6, 7, 13, 5, 7, 10, 6, 10, 4, 10, 4, 0, 14, 0, 8, 11, 7, 5, 11, 36, 36, 36, 4, 14, 4, 3, 6, 14, 11, 6, 13, 15, 14, 3, 6, 209, 36, 36, 209, 36, 36, 209, 36, 36, 36, 36, 36, 4, 2, 36, 36, 36, 5, 2, 7, 36, 36, 36, 36, 36, 36, 181, 96, 205, 36, 6, 36, 36, 36, 15, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 70, 86, 201, 26, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 11, 6, 4, 8, 14, 1, 11, 13, 0, 13, 0, 8, 2, 15, 4, 0, 9, 1, 12, 5, 7, 13, 13, 2, 0, 13, 11, 15, 12, 6, 1, 0, 1, 5, 36, 36, 36, 5, 15, 8, 3, 209, 36, 15, 13, 12, 6, 36, 36, 249, 97, 36, 36, 209, 209, 209, 36, 209, 9, 36, 209, 36, 36, 36, 36, 36, 209, 36, 36, 13, 4, 13, 1, 36, 36, 36, 209, 209, 36, 36, 36, 36, 36, 126, 36, 36, 14, 12, 36, 36, 36, 13, 14, 0, 6, 3, 0, 1, 10, 11, 14, 8, 7, 0, 14, 6, 2, 36, 36, 36, 36, 36, 45, 36, 5, 1, 36, 36, 6, 7, 13, 5, 7, 10, 6, 10, 4, 10, 4, 1, 0, 6, 12, 6, 2, 5, 8, 15, 36, 36, 36, 14, 4, 3, 6, 14, 11, 6, 13, 15, 14, 3, 6, 209, 36, 36, 1, 13, 4, 13, 11, 4, 7, 13, 36, 209, 209, 36, 36, 209, 209, 36, 36, 209, 36, 209, 36, 36, 89, 190, 167, 181, 101, 34, 36, 36, 36, 36, 6, 11, 3, 13, 15, 3, 9, 15, 3, 12, 8, 209, 36, 126, 36, 9, 10, 9, 15, 15, 3, 0, 9, 14, 3, 0, 36, 36, 1, 13, 4, 13, 11, 4, 7, 36, 36, 89, 204, 36, 36, 36, 181, 96, 205, 36, 36, 36, 209, 36, 36, 8, 13, 3, 4, 14, 126, 1, 36, 36, 36, 36, 36, 6, 7, 1, 15, 8, 36, 36, 36, 36, 36, 209, 36, 36, 10, 9, 15, 15, 3, 0, 9, 7, 13, 11, 36, 181, 96, 7, 6, 13, 147, 158, 184, 26, 94, 101, 112, 145, 101, 177, 191, 22, 126, 1, 36, 36, 36, 36, 36, 6, 7, 1, 15, 8, 36, 36, 36, 36, 36, 8, 0, 36, 181, 96, 205, 36, 36, 36, 209, 36, 36, 13, 3, 4, 14, 126, 1, 36, 36, 36, 36, 36, 6, 7, 1, 15, 7, 11, 2, 10, 15, 164, 36, 36, 36, 126, 36, 177, 15, 15, 1, 3, 0, 3, 7, 6, 9, 0, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 181, 96, 205, 36, 36, 36, 209, 36, 36, 13, 3, 8, 0, 10, 9, 15, 4, 9, 36, 36, 36, 36, 36, 209, 36, 36, 12, 3, 8, 9, 13, 2, 12, 12, 13, 5, 6, 6, 0, 5, 3, 8, 12, 1, 5, 12, 12, 0, 15, 6, 11, 4, 2, 8, 36, 36, 36, 209, 209, 36, 36, 209, 10, 36, 36, 209, 36, 6, 6, 13, 15, 14, 11, 5, 7, 5, 4, 15, 8, 36, 36, 36, 36, 36, 36, 209, 36, 36, 1, 4, 11, 7, 36, 36, 36, 3, 14, 7, 12, 216, 36, 36, 36, 6, 36, 36, 36, 36, 36, 209, 7, 2, 10, 15, 6, 3, 0, 1, 10, 11, 14, 8, 7, 0, 14, 6, 13, 5, 13, 12, 36, 36, 36, 2, 2, 12, 126, 209, 13, 11, 3, 4, 1, 5, 12, 12, 0, 15, 0, 15, 164, 36, 36, 36, 126, 36, 177, 15, 2, 197, 96, 149, 36, 36, 36, 36, 36, 36, 36, 36, 209, 209, 36, 36, 36, 6, 9, 15, 2, 36, 36, 209, 209, 36, 36, 0, 14, 0, 8, 11, 7, 5, 11, 36, 36, 36, 4, 36, 209, 36, 10, 6, 209, 36, 36, 1, 5, 5, 8, 209, 36, 126, 36, 9, 10, 9, 15, 15, 14, 9, 14, 36, 6, 36, 36, 8, 1, 13, 3, 12, 3, 101, 34, 36, 10, 7, 7, 3, 0, 3, 5, 8, 3, 7, 12, 13, 5, 8, 11, 4, 6, 3, 2, 8, 2, 1, 5, 15, 7, 0, 2, 0, 6, 12, 13, 10, 126, 36, 36, 209, 36, 209, 36, 36, 36, 209, 36, 36, 209, 36, 36, 36, 36, 36, 36, 209, 36, 209, 36, 36, 36, 36, 36, 4, 15, 15, 14, 209, 36, 36, 9, 6, 15, 1, 7, 2, 209, 36, 36, 36, 4, 9, 36, 36, 209, 209, 36, 36, 209, 36, 4, 7, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 13, 6, 15, 1, 5, 12, 6, 36, 36, 209, 209, 36, 36, 36, 36, 36, 89, 190, 167, 181, 101, 34, 36, 36, 36, 36, 6, 11, 3, 13, 6, 10, 11, 15, 13, 36, 36, 36, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 123, 125, 125, 125, 125, 125, 125, 125, 123, 125, 125, 125, 123, 108, 108, 108, 108, 108, 138, 80, 163, 163, 163, 163, 163, 59, 51, 108, 138, 138, 123, 108, 108, 108, 108, 123, 123, 108, 108, 43, 51, 180, 180, 108, 123, 153, 108, 108, 123, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 163, 163, 163, 163, 163, 3, 11, 13, 180, 10, 7, 5, 108, 108, 80, 163, 163, 163, 163, 163, 163, 80, 163, 163, 163, 163, 59, 43, 108, 178, 45, 43, 108, 108, 123, 123, 108, 108, 108, 108, 216, 45, 43, 168, 41, 168, 47, 168, 180, 163, 163, 163, 180, 149, 123, 108, 108, 153, 108, 108, 108, 123, 108, 138, 108, 108, 108, 108, 108, 108, 108, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 108, 123, 108, 125, 125, 125, 125, 125, 125, 125, 108, 138, 123, 125, 125, 125, 125, 125, 125, 123, 123, 108, 14, 180, 6, 180, 49, 123, 123, 108, 168, 108, 153, 123, 138, 123, 140, 163, 163, 163, 163, 163, 180, 180, 5, 7, 6, 9, 108, 153, 108, 163, 163, 8, 9, 9, 108, 153, 153, 108, 138, 108, 108, 138, 123, 123, 108, 123, 108, 172, 163, 180, 178, 59, 41, 180, 7, 14, 180, 108, 47, 49, 180, 180, 180, 9, 14, 4, 163, 163, 163, 163, 163, 180, 6, 6, 8, 10, 180, 143, 170, 216, 45, 43, 168, 41, 168, 47, 168, 170, 6, 2, 202, 80, 35, 77, 163, 163, 123, 108, 123, 108, 108, 108, 108, 108, 125, 125, 125, 125, 125, 108, 108, 180, 41, 170, 170, 180, 253, 163, 163, 163, 163, 163, 125, 108, 108, 108, 123, 123, 108, 108, 138, 108, 108, 108, 123, 123, 108, 108, 108, 163, 180, 180, 180, 180, 180, 180, 163, 6, 6, 8, 180, 4, 163, 163, 163, 180, 180, 180, 163, 163, 163, 163, 62, 137, 108, 123, 108, 108, 108, 108, 108, 108, 125, 125, 125, 125, 125, 163, 180, 13, 165, 214, 123, 108, 108, 108, 108, 123, 123, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 108, 108, 108, 108, 108, 123, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 138, 180, 180, 14, 180, 6, 180, 49, 180, 180, 180, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 108, 123, 123, 174, 247, 57, 72, 108, 108, 168, 41, 168, 47, 168, 180, 163, 163, 163, 180, 149, 149, 123, 108, 108, 153, 108, 108, 108, 123, 108, 138, 108, 108, 123, 108, 108, 180, 41, 8, 23, 108, 108, 108, 138, 108, 123, 108, 108, 108, 108, 108, 123, 108, 108, 108, 108, 108, 138, 108, 138, 108, 108, 108, 108, 108, 108, 123, 108, 108, 123, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 108, 123, 108, 108, 125, 125, 125, 108, 123, 108, 125, 108, 123, 108, 108, 108, 163, 180, 41, 41, 108, 108, 123, 108, 108, 108, 108, 108, 108, 153, 108, 108, 108, 138, 123, 108, 168, 170, 170, 180, 108, 6, 8, 10, 108, 108, 108, 123, 108, 108, 108, 123, 108, 108, 108, 108, 108, 108, 108, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 108, 108, 123, 108, 138, 108, 108, 108, 108, 108, 108, 108, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 163, 80, 163, 108, 138, 108, 180, 253, 163, 163, 163, 163, 163, 125, 125, 125, 125, 125, 123, 108, 108, 123, 108, 108, 123, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 163, 163, 163, 163, 163, 108, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 123, 108, 125, 125, 125, 125, 108, 123, 108, 163, 180, 180, 5, 108, 123, 108, 153, 108, 123, 123, 108, 108, 108, 108, 108, 168, 180, 163, 163, 163, 180, 149, 123, 108, 108, 153, 125, 125, 125, 125, 108, 123, 123, 108, 138, 123, 180, 13, 180, 123, 108, 123, 108, 108, 108, 180, 180, 163, 6, 108, 138, 108, 123, 108, 108, 123, 108, 168, 47, 168, 180, 163, 163, 163, 180, 149, 149, 123, 108, 125, 125, 125, 125, 108, 123, 108, 153, 108, 108, 123, 123, 108, 125, 125, 125, 108, 108, 123, 108, 108, 123, 108, 123, 108, 108, 125, 125, 108, 108, 108, 123, 108, 108, 123, 138, 123, 108, 138, 108, 108, 108, 108, 138, 108, 123, 108, 123, 108, 108, 108, 123, 123, 108, 108, 108, 108, 108, 108, 108, 108, 108, 153, 108, 108, 108, 123, 108, 108, 123, 138, 123, 108, 108, 123, 108, 123, 108, 123, 108, 108, 108, 108, 108, 123, 153, 108, 108, 108, 108, 108, 108, 108, 123, 108, 138, 123, 108, 108, 123, 123, 108, 138, 108, 108, 108, 108, 108, 108, 108, 108, 108, 123, 108, 108, 108, 138, 108, 108, 123, 138, 108, 108, 108, 108, 108, 108, 153, 153, 108, 138, 108, 108, 138, 123, 123, 108, 108, 108, 123, 108, 138, 108, 108, 108, 108, 108, 108, 108, 108, 123, 108, 153, 153, 108, 138, 108, 108, 108, 108, 108, 123, 138, 108, 108, 123, 163, 163, 163, 163, 163, 123, 138, 138, 108, 123, 108, 123, 123, 108, 108, 108, 123, 108, 123, 108, 108, 108, 108, 123, 108, 108, 180, 41, 8, 23, 108, 108, 108, 108, 138, 108, 108, 114, 114, 114, 114, 114, 114, 114, 114, 41, 41, 41, 238, 238, 107, 238, 107, 238, 238, 238, 24, 114, 41, 41, 41, 41, 238, 238, 238, 238, 84, 107, 238, 238, 238, 114, 41, 41, 41, 197, 41, 167, 130, 238, 238, 238, 41, 238, 238, 238, 84, 238, 238, 238, 197, 114, 197, 227, 41, 41, 41, 227, 77, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 238, 114, 227, 41, 41, 41, 167, 41, 41, 41, 41, 137, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 24, 107, 41, 227, 213, 137, 238, 238, 238, 41, 41, 41, 238, 238, 238, 238, 238, 238, 238, 41, 41, 41, 114, 227, 41, 238, 238, 238, 41, 41, 41, 84, 54, 238, 238, 238, 227, 238, 238, 238, 250, 250, 238, 238, 238, 227, 238, 238, 238, 41, 41, 41, 41, 77, 41, 41, 41, 41, 24, 84, 41, 41, 41, 41, 41, 41, 17, 238, 238, 238, 41, 24, 41, 41, 41, 41, 197, 243, 238, 238, 238, 238, 238, 24, 227, 41, 41, 107, 41, 41, 41, 114, 238, 238, 238, 238, 238, 167, 167, 41, 41, 41, 227, 238, 238, 238, 41, 41, 41, 107, 41, 41, 41, 238, 238, 238, 238, 238, 238, 238, 227, 41, 41, 41, 227, 238, 238, 238, 41, 41, 41, 167, 41, 41, 227, 41, 41, 41, 41, 167, 114, 227, 41, 167, 84, 197, 190, 114, 238, 238, 238, 250, 238, 238, 238, 238, 238, 238, 84, 41, 41, 41, 197, 190, 114, 238, 238, 238, 114, 227, 137, 41, 41, 41, 238, 238, 238, 250, 114, 41, 41, 197, 84, 238, 238, 167, 227, 213, 137, 250, 41, 41, 41, 238, 238, 238, 238, 227, 250, 250, 238, 238, 107, 41, 227, 213, 41, 41, 41, 238, 238, 197, 114, 197, 227, 238, 41, 41, 24, 227, 238, 238, 238, 167, 41, 41, 41, 41, 114, 227, 238, 238, 238, 238, 238, 238, 238, 238, 54, 197, 41, 41, 41, 107, 238, 238, 238, 227, 114, 238, 227, 41, 54, 41, 227, 238, 84, 220, 41, 41, 41, 167, 84, 197, 190, 114, 84, 197, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 114, 41, 41, 41, 197, 227, 24, 114, 77, 107, 250, 41, 227, 238, 114, 41, 41, 114, 238, 238, 84, 238, 238, 238, 238, 84, 238, 238, 238, 238, 227, 238, 250, 24, 227, 41, 197, 197, 190, 114, 41, 41, 41, 54, 41, 238, 114, 227, 41, 41, 137, 137, 197, 167, 84, 41, 227, 238, 41, 238, 238, 238, 227, 250, 238, 238, 238, 41, 41, 114, 227, 41, 41, 41, 41, 137, 238, 238, 238, 238, 197, 41, 84, 238, 238, 238, 238, 238, 238, 238, 227, 238, 238, 238, 238, 238, 238, 238, 238, 114, 238, 238, 238, 238, 238, 238, 238, 227, 41, 41, 238, 114, 41, 114, 238, 167, 41, 41, 107, 41, 238, 238, 238, 238, 238, 238, 238, 54, 197, 41, 114, 238, 238, 238, 227, 227, 114, 238, 250, 84, 114, 167, 130, 238, 250, 160, 41, 41, 41, 167, 84, 176, 41, 41, 41, 238, 238, 197, 41, 238, 238, 238, 220, 114, 197, 227, 190, 114, 238, 238, 77, 197, 41, 84, 41, 41, 238, 197, 114, 197, 243, 238, 238, 197, 250, 238, 238, 130, 238, 250, 160, 160, 238, 238, 238, 238, 84, 114, 197, 227, 238, 47, 197, 84, 238, 238, 238, 238, 84, 41, 41, 227, 238, 238, 238, 167, 84, 197, 238, 238, 220, 213, 137, 250, 197, 41, 238, 238, 238, 238, 238, 238, 238, 41, 41, 41, 114, 114, 167, 41, 41, 41, 84, 197, 227, 227, 227, 197, 24, 41, 41, 41, 238, 238, 197, 238, 238, 238, 238, 238, 238, 238, 197, 114, 114, 167, 238, 238, 238, 238, 227, 238, 238, 238, 238, 238, 238, 227, 114, 238, 238, 238, 238, 41, 41, 41, 41, 227, 197, 41, 238, 114, 227, 190, 238, 238, 41, 84, 107, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 84, 227, 238, 238, 238, 238, 238, 114, 114, 238, 84, 107, 238, 238, 238, 238, 238, 227, 238, 238, 238, 238, 114, 107, 24, 41, 41, 41, 54, 41, 238, 114, 227, 84, 84, 167, 41, 84, 197, 84, 84, 227, 238, 114, 238, 238, 238, 238, 238, 238, 238, 238, 84, 114, 227, 41, 114, 84, 114, 41, 41, 238, 84, 238, 114, 238, 238, 238, 238, 107, 243, 238, 41, 41, 197, 227, 137, 197, 137, 41, 84, 238, 107, 130, 227, 41, 236, 41, 197, 243, 238, 227, 238, 114, 238, 107, 238, 238, 238, 227, 41, 238, 238, 238, 54, 41, 227, 24, 197, 54, 197, 54, 84, 227, 227, 227, 238, 238, 197, 47, 197, 84, 238, 238, 114, 238, 238, 227, 250, 238, 41, 41, 197, 190, 114, 84, 197, 197, 238, 238, 238, 238, 227, 238, 84, 220, 41, 238, 238, 238, 238, 238, 41, 41, 227, 238, 238, 54, 24, 238, 220, 114, 114, 41, 41, 114, 227, 84, 114, 24, 54, 114, 238, 238, 84, 227, 84, 84, 238, 197, 114, 197, 41, 107, 41, 41, 41, 114, 41, 41, 167, 130, 227, 41, 167, 84, 167, 197, 227, 54, 238, 238, 250, 238, 114, 24, 238, 238, 250, 227, 114, 238, 238, 250, 190, 137, 183, 137, 114, 227, 227, 220, 54, 41, 41, 41, 114, 227, 238, 238, 41, 41, 107, 41, 107, 84, 54, 238, 238, 238, 137, 197, 167, 197, 238, 238, 238, 238, 238, 238, 238, 238, 54, 195, 105, 105, 105, 22, 188, 238, 238, 238, 15, 15, 188, 15, 188, 41, 41, 41, 41, 17, 238, 238, 238, 41, 24, 41, 41, 22, 105, 105, 15, 105, 15, 105, 105, 188, 15, 41, 41, 227, 41, 114, 227, 238, 238, 188, 15, 29, 188, 188, 22, 15, 15, 15, 188, 22, 238, 238, 238, 41, 41, 41, 167, 188, 188, 202, 188, 195, 15, 15, 15, 15, 15, 188, 15, 15, 15, 22, 105, 238, 227, 238, 27, 114, 178, 179, 196, 172, 178, 179, 178, 180, 178, 178, 178, 109, 109, 109, 109, 225, 109, 109, 109, 109, 109, 109, 201, 109, 109, 109, 109, 109, 109, 109, 251, 178, 180, 179, 244, 193, 58, 109, 109, 109, 27, 180, 178, 178, 179, 179, 180, 178, 178, 178, 178, 178, 181, 182, 178, 56, 109, 109, 109, 254, 109, 109, 109, 109, 178, 179, 178, 196, 109, 109, 109, 109, 109, 109, 109, 225, 225, 178, 148, 196, 179, 178, 178, 114, 225, 85, 109, 109, 109, 109, 225, 178, 56, 109, 109, 109, 254, 109, 109, 109, 109, 27, 193, 58, 109, 109, 109, 254, 114, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 178, 178, 109, 109, 85, 196, 254, 254, 225, 172, 196, 172, 71, 85, 56, 254, 172, 172, 196, 225, 114, 56, 143, 196, 196, 177, 27, 254, 27, 32, 230, 225, 3, 27, 254, 90, 114, 85, 196, 254, 196, 225, 172, 148, 56, 143, 114, 27, 56, 85, 225, 8, 90, 196, 114, 230, 27, 196, 230, 56, 56, 172, 225, 254, 172, 196, 201, 178, 180, 178, 178, 178, 109, 109, 254, 225, 196, 27, 27, 225, 201, 196, 254, 225, 225, 56, 225, 27, 254, 56, 13, 27, 196, 254, 196, 225, 90, 225, 206, 254, 254, 225, 56, 225, 225, 254, 3, 254, 230, 27, 196, 172, 196, 56, 254, 32, 56, 85, 196, 143, 230, 27, 196, 201, 254, 27, 56, 114, 114, 225, 196, 225, 143, 254, 254, 196, 254, 196, 172, 143, 201, 230, 196, 196, 225, 196, 32, 254, 201, 143, 196, 235, 196, 196, 56, 114, 56, 153, 85, 109, 109, 109, 27, 85, 56, 196, 196, 196, 225, 225, 196, 8, 254, 90, 225, 143, 225, 254, 27, 8, 196, 27, 196, 114, 196, 230, 196, 254, 61, 85, 114, 3, 27, 32, 3, 254, 196, 56, 114, 143, 114, 85, 172, 90, 254, 172, 206, 196, 196, 56, 37, 61, 196, 32, 225, 27, 85, 172, 196, 114, 61, 196, 85, 148, 114, 225, 196, 225, 56, 56, 196, 225, 114, 27, 225, 196, 230, 201, 27, 27, 196, 254, 56, 254, 114, 56, 196, 85, 254, 56, 56, 143, 56, 196, 254, 143, 172, 85, 27, 225, 225, 196, 225, 114, 90, 225, 196, 27, 196, 172, 114, 85, 196, 27, 201, 56, 172, 225, 61, 27, 254, 90, 85, 225, 196, 225, 56, 196, 114, 201, 56, 172, 254, 225, 27, 196, 56, 254, 201, 172, 85, 148, 254, 27, 196, 56, 61, 143, 56, 56, 61, 201, 196, 148, 196, 114, 27, 114, 143, 196, 114, 225, 32, 225, 85, 254, 225, 196, 230, 196, 143, 56, 109, 109, 109, 148, 254, 27, 196, 254, 225, 225, 254, 3, 27, 225, 235, 254, 254, 114, 27, 61, 196, 61, 230, 201, 172, 196, 27, 196, 196, 196, 254, 172, 225, 254, 196, 114, 254, 114, 114, 254, 85, 196, 225, 230, 196, 85, 85, 196, 85, 119, 56, 114, 27, 196, 56, 254, 196, 27, 225, 85, 196, 56, 230, 225, 225, 196, 27, 71, 254, 196, 27, 201, 114, 225, 119, 254, 225, 230, 56, 225, 225, 172, 27, 27, 172, 196, 56, 225, 196, 196, 177, 119, 119, 225, 95, 27, 225, 27, 114, 85, 206, 196, 119, 196, 196, 254, 230, 172, 201, 196, 196, 27, 114, 225, 56, 196, 225, 196, 27, 196, 143, 254, 85, 56, 143, 119, 225, 114, 56, 225, 114, 196, 225, 85, 230, 225, 225, 254, 254, 254, 56, 225, 143, 254, 196, 172, 225, 201, 254, 196, 196, 3, 85, 235, 254, 3, 254, 27, 172, 196, 172, 85, 56, 254, 225, 225, 143, 32, 172, 143, 114, 27, 196, 143, 27, 85, 85, 254, 254, 254, 85, 148, 143, 225, 196, 196, 56, 148, 3, 254, 254, 56, 56, 172, 114, 85, 56, 85, 90, 85, 109, 109, 109, 109, 95, 37, 254, 225, 225, 27, 172, 114, 196, 230, 201, 196, 85, 27, 85, 85, 254, 254, 230, 177, 254, 225, 3, 114, 172, 196, 254, 114, 196, 27, 225, 61, 201, 114, 27, 177, 114, 172, 85, 196, 27, 27, 225, 56, 153, 225, 27, 61, 143, 196, 27, 85, 27, 56, 143, 172, 3, 196, 225, 196, 196, 85, 143, 27, 196, 56, 254, 56, 225, 196, 225, 56, 56, 196, 27, 56, 148, 196, 114, 114, 225, 27, 225, 85, 27, 172, 109, 109, 109, 14, 230, 238, 182, 252, 109, 245, 37, 24, 187, 152, 238, 184, 132, 6, 43, 149, 236, 145, 152, 59, 58, 211, 73, 221, 16, 203, 163, 221, 235, 5, 133, 107, 39, 127, 52, 44, 253, 35, 83, 20, 225, 120, 17, 10, 89, 24, 157, 230, 218, 15, 51, 54, 93, 133, 26, 156, 74, 66, 81, 39, 225, 198, 192, 196, 239, 145, 226, 94, 83, 106, 202, 60, 128, 178, 180, 180, 236, 248, 207, 102, 54, 68, 141, 223, 27, 101, 55, 99, 46, 204, 114, 160, 173, 105, 90, 180, 18, 183, 239, 237, 198, 23, 173, 185, 229, 137, 223, 216, 146, 255, 140, 198, 172, 238, 94, 4, 237, 98, 20, 131, 222, 211, 218, 241, 192, 176, 80, 237, 118, 88, 228, 18, 143, 234, 236, 4, 8, 239, 36, 4, 249, 146, 65, 105, 117, 110, 171, 148, 172, 196, 114, 214, 76, 1, 16, 185, 178, 54, 205, 117, 33, 96, 251, 187, 54, 168, 157, 80, 94, 61, 18, 42, 123, 69, 118, 17, 58, 146, 238, 116, 252, 141, 38, 37, 226, 250, 115, 169, 102, 38, 3, 186, 144, 78, 169, 193, 54, 80, 93, 66, 246, 214, 53, 151, 22, 65, 153, 156, 102, 38, 227, 92, 27, 216, 74, 160, 146, 185, 233, 67, 77, 91, 208, 78, 96, 129, 35, 188, 71, 10, 31, 62, 24, 57, 202, 52, 56, 227, 110, 107, 232, 64, 228, 81, 243, 88, 53, 21, 142, 32, 56, 26, 170, 113, 252, 137, 118, 44, 69, 214, 242, 57, 181, 250, 223, 93, 66, 77, 141, 81, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 206, 206, 14, 141, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 141, 206, 141, 141, 141, 141, 141, 141, 141, 206, 141, 188, 102, 121, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 141, 154, 141, 141, 141, 253, 45, 179, 118, 108, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 237, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 141, 141, 206, 141, 141, 15, 206, 195, 134, 207, 9, 141, 141, 141, 141, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 14, 141, 206, 206, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 141, 15, 141, 141, 81, 81, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 141, 81, 81, 81, 141, 71, 52, 224, 141, 206, 206, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 141, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 206, 145, 48, 188, 102, 179, 125, 86, 50, 141, 206, 206, 141, 80, 141, 141, 141, 141, 81, 81, 206, 81, 206, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 15, 141, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 206, 141, 81, 81, 81, 81, 141, 141, 141, 206, 141, 206, 141, 141, 141, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 141, 141, 141, 87, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 206, 141, 141, 22, 108, 141, 81, 81, 206, 219, 206, 141, 141, 141, 81, 81, 81, 81, 141, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 206, 80, 141, 141, 141, 154, 141, 141, 141, 141, 141, 206, 108, 141, 81, 81, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 141, 210, 141, 146, 205, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 81, 26, 2, 74, 22, 141, 141, 141, 141, 206, 141, 141, 141, 206, 141, 81, 81, 81, 206, 81, 141, 141, 81, 81, 81, 81, 2, 74, 22, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 206, 206, 141, 141, 206, 141, 141, 141, 141, 15, 206, 141, 141, 206, 195, 134, 207, 81, 81, 81, 81, 81, 141, 141, 141, 80, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 141, 141, 141, 141, 141, 15, 141, 206, 141, 141, 206, 141, 141, 81, 141, 141, 81, 15, 141, 141, 81, 81, 141, 141, 141, 206, 81, 141, 141, 206, 141, 141, 141, 81, 81, 81, 81, 81, 141, 141, 141, 141, 80, 141, 81, 81, 141, 141, 81, 26, 81, 81, 81, 81, 81, 81, 206, 206, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 15, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 206, 206, 141, 206, 206, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 81, 141, 141, 206, 81, 141, 206, 15, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 141, 141, 206, 206, 141, 145, 141, 206, 141, 141, 141, 141, 141, 141, 141, 15, 141, 141, 141, 15, 141, 141, 141, 206, 15, 141, 15, 141, 141, 81, 206, 206, 141, 141, 206, 206, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 141, 71, 52, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 141, 206, 141, 15, 141, 206, 15, 141, 145, 141, 206, 141, 15, 206, 206, 141, 141, 206, 206, 15, 141, 90, 85, 85, 172, 177, 85, 85, 3, 85, 85, 172, 85, 172, 85, 172, 95, 3, 85, 90, 85, 85, 85, 85, 85, 90, 3, 3, 85, 172, 85, 90, 85, 172, 85, 3, 90, 177, 172, 172, 85, 172, 172, 3, 85, 172, 3, 3, 85, 85, 85, 3, 172, 85, 85, 85, 85, 85, 3, 172, 85, 85, 85, 90, 8, 85, 177, 172, 8, 3, 85, 85, 85, 177, 85, 90, 90, 3, 177, 13, 90, 172, 85, 85, 177, 85, 85, 85, 85, 172, 85, 85, 85, 85, 85, 3, 85, 85, 85, 85, 172, 172, 3, 85, 177, 90, 85, 3, 172, 3, 85, 90, 90, 85, 172, 85, 172, 3, 85, 172, 85, 85, 85, 85, 85, 8, 172, 177, 85, 172, 90, 172, 172, 177, 172, 172, 85, 85, 172, 90, 85, 85, 90, 172, 90, 90, 172, 172, 85, 3, 85, 172, 172, 3, 172, 3, 172, 3, 172, 182, 3, 177, 85, 177, 85, 90, 182, 182, 206, 81, 206, 206, 206, 141, 141, 81, 81, 141, 141, 141, 206, 81, 141, 141, 206, 141, 141, 141, 81, 81, 81, 81, 81, 141, 141, 141, 141, 80, 141, 81, 149, 149, 149, 149, 149, 149, 149, 149, 66, 20, 164, 121, 93, 72, 65, 48, 222, 82, 64, 129, 73, 185, 146, 94, 97, 27, 140, 202, 213, 254, 2, 102, 96, 202, 98, 0, 217, 197, 3, 79, 235, 209, 141, 209, 168, 111, 208, 20, 232, 173, 128, 252, 63, 15, 70, 40, 212, 229, 12, 147, 230, 90, 226, 146, 238, 88, 176, 82, 13, 26, 195, 138, 173, 253, 93, 70, 1, 167, 166, 235, 181, 20, 28, 153, 54, 199, 95, 143, 62, 98, 190, 163, 116, 70, 179, 191, 35, 40, 150, 76, 227, 5, 184, 55, 1, 135, 224, 135, 44, 19, 166, 217, 249, 180, 7, 109, 120, 206, 98, 160, 199, 236, 174, 196, 75, 123, 98, 150, 53, 147, 139, 41, 209, 187, 160, 190, 60, 149, 212, 233, 236, 255, 234, 102, 121, 127, 102, 22, 37, 132, 141, 71, 182, 249, 51, 134, 54, 39, 33, 155, 57, 134, 177, 167, 191, 30, 26, 170, 131, 37, 95, 123, 112, 102, 32, 163, 244, 75, 57, 176, 53, 188, 50, 126, 198, 0, 250, 183, 28, 8, 84, 104, 255, 238, 144, 4, 86, 143, 185, 56, 209, 193, 193, 47, 80, 244, 95, 85, 187, 179, 71, 141, 245, 205, 83, 160, 79, 30, 213, 234, 195, 212, 126, 17, 111, 111, 178, 52, 188, 241, 217, 117, 47, 62, 125, 110, 159, 209, 65, 17, 84, 125, 147, 246, 201, 0, 30, 215, 158, 106, 108, 52, 48, 250, 152, 102, 220, 140, 120, 56, 194, 179, 22, 173, 158, 232, 154, 68, 218, 22, 207, 25, 205, 225, 131, 179, 253, 82, 128, 193, 157, 247, 241, 53, 61, 117, 127, 69, 6, 251, 125, 198, 0, 166, 200, 235, 121, 235, 104, 132, 60, 230, 108, 211, 150, 147, 211, 105, 142, 25, 154, 234, 243, 174, 244, 60, 149, 56, 6, 206, 52, 191, 194, 30, 190, 144, 155, 41, 29, 49, 63, 253, 137, 89, 198, 242, 246, 201, 26, 128, 201, 68, 70, 63, 23, 35, 127, 251, 93, 100, 84, 163, 184, 226, 144, 83, 60, 241, 85, 53, 223, 6, 255, 23, 99, 109, 195, 84, 194, 64, 129, 95, 82, 224, 45, 243, 63, 221, 48, 194, 131, 18, 72, 217, 35, 153, 130, 154, 43, 146, 233, 41, 21, 59, 9, 110, 129, 115, 154, 69, 25, 191, 11, 158, 232, 132, 38, 231, 120, 18, 194, 153, 166, 179, 80, 237, 99, 16, 80, 217, 71, 151, 120, 233, 98, 84, 228, 97, 10, 187, 10, 222, 13, 182, 212, 237, 28, 152, 98, 26, 69, 190, 134, 158, 177, 153, 148, 90, 231, 199, 45, 143, 189, 81, 96, 1, 239, 254, 127, 230, 242, 109, 244, 84, 146, 73, 45, 48, 40, 225, 59, 115, 157, 47, 253, 76, 4, 19, 153, 121, 187, 84, 179, 129, 4, 3, 97, 215, 194, 244, 219, 85, 111, 196, 238, 111, 32, 155, 18, 90, 4, 230, 130, 19, 40, 86, 175, 161, 103, 148, 91, 201, 120, 66, 159, 95, 245, 46, 125, 204, 168, 73, 124, 80, 255, 205, 159, 64, 228, 254, 255, 198, 36, 62, 98, 46, 200, 19, 142, 25, 54, 167, 225, 11, 114, 250, 40, 6, 121, 117, 18, 37, 167, 185, 187, 158, 39, 108, 65, 212, 65, 193, 212, 47, 113, 206, 70, 64, 95, 127, 243, 150, 184, 252, 20, 70, 167, 106, 17, 244, 242, 25, 224, 170, 79, 75, 20, 176, 173, 255, 34, 102, 193, 41, 70, 44, 27, 119, 69, 229, 190, 10, 41, 200, 122, 199, 196, 47, 222, 247, 26, 181, 93, 65, 150, 63, 183, 215, 150, 49, 189, 233, 225, 68, 106, 90, 94, 50, 23, 45, 222, 172, 74, 73, 22, 112, 129, 237, 24, 68, 58, 145, 57, 233, 235, 223, 200, 66, 205, 171, 78, 252, 78, 239, 6, 7, 14, 7, 14, 7, 14, 7, 14, 9, 14, 7, 14, 9, 14, 7, 14, 7, 14, 7, 14, 4, 7, 14, 7, 14, 7, 1, 9, 14, 7, 14, 7, 11, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 7, 7, 14, 9, 7, 7, 7, 14, 9, 6, 11, 14, 7, 14, 4, 10, 11, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 7, 14, 9, 7, 7, 7, 14, 7, 6, 4, 7, 14, 7, 7, 14, 9, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 7, 7, 7, 7, 14, 7, 7, 14, 13, 14, 6, 4, 7, 14, 7, 7, 14, 9, 9, 9, 9, 9, 0, 10, 11, 14, 9, 9, 14, 7, 3, 7, 11, 14, 4, 7, 14, 7, 7, 7, 14, 9, 7, 7, 7, 1, 13, 7, 7, 7, 6, 5, 2, 7, 14, 7, 14, 9, 14, 7, 7, 14, 9, 3, 14, 14, 14, 14, 7, 3, 12, 15, 11, 14, 7, 14, 13, 5, 4, 10, 11, 14, 4, 5, 2, 9, 9, 0, 10, 11, 11, 1, 9, 14, 7, 7, 7, 14, 9, 7, 7, 7, 14, 9, 7, 7, 7, 14, 7, 6, 4, 7, 14, 7, 7, 14, 9, 14, 4, 7, 7, 2, 1, 9, 14, 14, 7, 7, 14, 9, 7, 3, 7, 14, 7, 150, 150, 150, 9, 14, 7, 14, 9, 150, 150, 150, 150, 150, 150, 14, 9, 14, 7, 7, 14, 21, 9, 7, 7, 4, 5, 2, 9, 150, 21, 150, 150, 150, 150, 7, 14, 9, 6, 11, 150, 150, 150, 150, 150, 150, 14, 4, 7, 14, 7, 7, 7, 14, 9, 7, 7, 14, 14, 14, 150, 150, 150, 21, 150, 150, 150, 150, 150, 7, 7, 7, 14, 7, 150, 150, 150, 21, 21, 150, 7, 14, 9, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 14, 9, 6, 11, 150, 150, 150, 150, 1, 9, 14, 7, 150, 150, 150, 150, 150, 21, 150, 7, 14, 9, 150, 150, 150, 150, 9, 7, 3, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 14, 223, 161, 173, 139, 177, 35, 28, 219, 164, 244, 162, 181, 88, 99, 240, 3, 95, 42, 64, 236, 72, 198, 149, 15, 59, 210, 30, 17, 135, 84, 241, 33, 3, 151, 69, 9, 109, 94, 37, 178, 187, 78, 115, 80, 36, 116, 103, 133, 194, 211, 77, 81, 141, 43, 82, 140, 146, 180, 7, 237, 21, 41, 19, 204, 69, 71, 81, 95, 28, 42, 127, 12, 43, 71, 86, 223, 241, 60, 130, 14, 45, 18, 160, 25, 250, 105, 59, 116, 236, 199, 94, 111, 44, 71, 105, 191, 87, 117, 67, 69, 133, 83, 188, 193, 85, 232, 218, 4, 115, 78, 74, 240, 168, 203, 162, 0, 82, 58, 83, 142, 94, 16, 209, 57, 10, 175, 42, 28, 22, 17, 86, 25, 31, 69, 214, 191, 230, 44, 131, 172, 73, 26, 148, 234, 105, 120, 168, 86, 118, 16, 158, 217, 2, 69, 16, 191, 31, 108, 132, 116, 58, 242, 139, 181, 117, 138, 41, 152, 226, 97, 189, 227, 41, 56, 128, 238, 115, 41, 233, 236, 250, 77, 38, 208, 212, 188, 242, 30, 194, 227, 78, 19, 4, 182, 21, 109, 29, 23, 239, 121, 206, 165, 217, 43, 244, 233, 247, 164, 212, 144, 167, 201, 17, 204, 169, 160, 27, 2, 81, 151, 133, 10, 141, 72, 68, 96, 41, 99, 78, 255, 106, 6, 146, 161, 218, 139, 117, 210, 222, 177, 157, 69, 49, 191, 167, 36, 90, 91, 237, 212, 43, 177, 141, 132, 135, 231, 156, 157, 161, 122, 61, 134, 43, 167, 175, 63, 47, 174, 21, 205, 38, 160, 92, 171, 93, 188, 206, 78, 84, 80, 59, 69, 152, 127, 90, 111, 63, 116, 251, 222, 96, 2, 114, 138, 217, 56, 107, 251, 162, 6, 104, 159, 159, 218, 210, 174, 233, 220, 163, 98, 73, 227, 65, 33, 180, 91, 128, 30, 25, 10, 166, 1, 132, 151, 83, 37, 215, 187, 121, 110, 179, 84, 205, 76, 132, 16, 106, 132, 217, 7, 78, 93, 155, 50, 198, 77, 190, 16, 23, 7, 219, 55, 246, 183, 148, 13, 109, 180, 210, 45, 42, 91, 208, 82, 11, 76, 42, 176, 75, 241, 36, 117, 238, 64, 22, 127, 212, 199, 210, 207, 185, 7, 32, 183, 16, 10, 163, 40, 232, 186, 20, 190, 47, 172, 5, 124, 192, 52, 20, 61, 51, 28, 51, 10, 18, 136, 207, 152, 231, 159, 222, 122, 150, 121, 37, 150, 27, 40, 60, 140, 189, 170, 39, 124, 245, 194, 148, 239, 241, 103, 26, 67, 169, 120, 85, 22, 203, 193, 2, 175, 235, 146, 169, 41, 165, 16, 72, 249, 152, 144, 103, 233, 177, 190, 171, 62, 160, 137, 204, 152, 198, 127, 22, 86, 13, 174, 116, 188, 44, 214, 117, 241, 44, 50, 230, 61, 129, 0, 134, 55, 11, 220, 142, 226, 212, 252, 135, 166, 204, 155, 202, 122, 29, 200, 19, 0, 162, 130, 173, 231, 105, 100, 237, 99, 242, 35, 54, 219, 44, 109, 21, 52, 195, 255, 177, 134, 119, 161, 236, 107, 120, 218, 52, 162, 229, 234, 229, 4, 210, 171, 247, 156, 176, 140, 115, 24, 152, 99, 62, 35, 207, 13, 106, 232, 191, 88, 220, 82, 110, 71, 46, 127, 91, 80, 92, 33, 37, 76, 188, 48, 165, 112, 1, 165, 49, 102, 1, 227, 84, 119, 179, 69, 8, 98, 116, 35, 247, 94, 35, 146, 235, 248, 253, 70, 45, 150, 236, 137, 150, 167, 225, 203, 81, 126, 51, 246, 221, 117, 6, 133, 250, 62, 101, 14, 3, 106, 2, 20, 19, 115, 207, 101, 94, 109, 74, 84, 92, 68, 106, 63, 189, 15, 55, 104, 141, 33, 217, 27, 82, 203, 139, 224, 237, 16, 49, 57, 33, 57, 74, 110, 154, 102, 138, 3, 228, 44, 92, 44, 184, 22, 4, 89, 177, 159, 213, 26, 203, 183, 186, 77, 136, 151, 195, 41, 76, 231, 157, 51, 116, 94, 65, 178, 150, 74, 226, 208, 15, 254, 149, 38, 79, 171, 223, 156, 6, 156, 24, 122, 120, 37, 164, 59, 4, 2, 49, 247, 214, 35, 126, 91, 237, 139, 117, 248, 226, 52, 43, 61, 97, 59, 37, 174, 213, 163, 162, 235, 222, 61, 227, 142, 190, 63, 251, 174, 220, 82, 26, 193, 92, 234, 177, 196, 50, 206, 252, 224, 152, 107, 52, 244, 63, 146, 159, 105, 215, 223, 54, 2, 94, 107, 163, 52, 163, 94, 201, 114, 251, 251, 215, 6, 70, 239, 205, 100, 200, 31, 72, 42, 15, 69, 117, 255, 16, 176, 178, 77, 220, 237, 219, 196, 213, 79, 82, 16, 223, 146, 183, 41, 175, 179, 3, 141, 16, 59, 177, 174, 96, 13, 50, 72, 112, 81, 164, 161, 29, 8, 40, 168, 16, 12, 23, 40, 185, 8, 129, 8, 33, 54, 173, 189, 234, 147, 137, 137, 140, 175, 115, 28, 221, 243, 177, 149, 221, 108, 142, 148, 4, 66, 52, 191, 29, 34, 66, 187, 143, 98, 114, 3, 211, 154, 132, 218, 76, 146, 62, 105, 129, 193, 12, 207, 110, 97, 18, 67, 232, 63, 94, 142, 76, 108, 220, 52, 152, 24, 16, 10, 59, 35, 159, 59, 83, 58, 85, 46, 28, 220, 170, 194, 88, 108, 11, 180, 41, 155, 13, 250, 165, 127, 66, 118, 235, 204, 181, 0, 97, 85, 125, 1, 208, 87, 160, 1, 25, 26, 46, 138, 43, 106, 152, 52, 242, 69, 246, 255, 58, 238, 230, 68, 118, 60, 145, 14, 63, 123, 63, 246, 83, 127, 10, 182, 253, 108, 98, 180, 213, 47, 223, 44, 99, 66, 13, 223, 61, 181, 30, 77, 223, 155, 13, 90, 202, 57, 225, 238, 29, 221, 51, 66, 242, 245, 40, 149, 21, 72, 214, 176, 38, 96, 186, 131, 189, 114, 85, 101, 37, 20, 130, 121, 86, 194, 223, 79, 207, 237, 242, 100, 153, 40, 117, 44, 213, 102, 193, 99, 187, 198, 32, 47, 97, 197, 70, 15, 215, 21, 64, 153, 106, 207, 245, 192, 84, 189, 234, 254, 1, 14, 25, 85, 28, 230, 179, 211, 28, 14, 6, 13, 77, 13, 7, 9, 12, 139, 199, 239, 5, 11, 14, 2, 7, 93, 8, 239, 5, 15, 6, 7, 7, 0, 8, 3, 239, 10, 11, 247, 2, 7, 93, 8, 239, 13, 5, 15, 6, 7, 1, 224, 97, 166, 185, 250, 15, 230, 13, 14, 121, 8, 248, 34, 247, 69, 14, 73, 9, 5, 11, 13, 11, 2, 10, 7, 11, 138, 1, 1, 2, 139, 243, 235, 239, 5, 20, 11, 9, 6, 11, 12, 2, 11, 8, 239, 109, 0, 220, 118, 34, 6, 1, 8, 4, 10, 12, 93, 7, 139, 2, 61, 156, 208, 142, 2, 126, 239, 9, 3, 1, 13, 99, 233, 99, 166, 255, 139, 215, 189, 49, 166, 139, 14, 85, 1, 163, 9, 14, 209, 39, 102, 42, 255, 42, 0, 12, 2, 253, 93, 1, 12, 139, 112, 1, 15, 10, 8, 6, 3, 42, 177, 243, 145, 44, 4, 166, 68, 74, 76, 10, 0, 13, 14, 14, 3, 11, 139, 20, 183, 55, 200, 3, 9, 2, 139, 76, 178, 139, 57, 205, 139, 14, 73, 4, 37, 117, 10, 3, 3, 15, 12, 112, 215, 199, 113, 0, 30, 164, 170, 88, 127, 11, 239, 3, 12, 3, 5, 8, 10, 15, 0, 1, 3, 9, 7, 1, 10, 0, 15, 106, 2, 66, 213, 9, 180, 123, 134, 118, 8, 11, 1, 5, 1, 3, 166, 13, 72, 127, 119, 40, 14, 9, 3, 106, 106, 168, 245, 211, 95, 11, 3, 11, 12, 13, 255, 1, 5, 3, 4, 15, 2, 14, 5, 10, 3, 8, 15, 159, 29, 118, 0, 0, 10, 12, 3, 14, 2, 250, 139, 4, 13, 5, 9, 177, 13, 8, 13, 139, 10, 6, 2, 166, 3, 179, 0, 136, 139, 250, 239, 4, 13, 3, 5, 161, 148, 162, 113, 189, 245, 222, 141, 98, 120, 173, 21, 2, 10, 13, 0, 15, 9, 101, 238, 3, 1, 2, 13, 6, 139, 14, 190, 196, 15, 3, 14, 1, 114, 15, 15, 11, 13, 146, 54, 84, 176, 7, 0, 92, 187, 4, 9, 0, 218, 231, 166, 6, 9, 9, 86, 3, 11, 6, 4, 1, 7, 4, 2, 5, 13, 11, 8, 9, 0, 239, 10, 5, 14, 8, 16, 1, 250, 250, 163, 215, 14, 12, 1, 0, 13, 1, 166, 15, 239, 20, 121, 46, 250, 250, 239, 2, 12, 139, 8, 0, 13, 197, 243, 0, 222, 183, 170, 46, 98, 0, 20, 138, 2, 4, 57, 1, 10, 0, 1, 15, 8, 0, 0, 10, 239, 145, 90, 250, 239, 2, 12, 6, 166, 179, 23, 110, 248, 1, 233, 20, 15, 106, 93, 166, 13, 5, 15, 6, 7, 1, 249, 1, 1, 139, 1, 13, 137, 14, 9, 203, 208, 13, 139, 7, 215, 9, 7, 14, 13, 7, 12, 6, 156, 23, 247, 124, 59, 103, 127, 7, 14, 13, 7, 4, 1, 0, 7, 6, 8, 3, 166, 166, 24, 180, 113, 179, 24, 4, 0, 11, 2, 250, 250, 4, 93, 6, 93, 7, 139, 2, 61, 156, 208, 139, 6, 7, 166, 239, 3, 15, 37, 69, 198, 159, 158, 194, 166, 224, 143, 0, 13, 5, 24, 239, 8, 13, 6, 1, 7, 8, 200, 4, 5, 1, 13, 9, 4, 11, 169, 200, 156, 232, 27, 93, 2, 5, 5, 14, 187, 132, 253, 4, 10, 12, 3, 1, 3, 141, 207, 130, 169, 96, 12, 198, 5, 13, 93, 121, 46, 214, 6, 13, 0, 13, 93, 4, 239, 89, 9, 13, 10, 3, 239, 3, 12, 3, 185, 232, 254, 177, 243, 152, 11, 156, 14, 2, 5, 178, 16, 156, 3, 239, 11, 7, 8, 7, 11, 12, 9, 14, 121, 128, 125, 135, 123, 83, 10, 15, 12, 24, 6, 7, 139, 2, 1, 239, 101, 208, 12, 12, 6, 139, 7, 11, 230, 59, 176, 11, 228, 213, 149, 6, 246, 166, 8, 201, 5, 8, 4, 5, 7, 1, 7, 13, 26, 139, 106, 11, 12, 178, 1, 106, 106, 139, 10, 8, 157, 81, 208, 106, 3, 81, 139, 2, 0, 12, 9, 15, 13, 10, 7, 84, 102, 230, 139, 11, 9, 6, 11, 12, 2, 11, 8, 1, 3, 13, 9, 12, 1, 4, 3, 1, 4, 26, 14, 8, 10, 28, 74, 1, 242, 45, 245, 72, 6, 3, 8, 5, 14, 8, 16, 6, 3, 14, 1, 139, 73, 1, 224, 97, 77, 224, 69, 106, 139, 171, 239, 8, 4, 14, 14, 0, 15, 15, 72, 127, 3, 9, 54, 187, 197, 203, 9, 4, 106, 4, 4, 14, 93, 166, 9, 12, 157, 0, 2, 11, 0, 15, 0, 0, 11, 213, 9, 7, 14, 249, 71, 102, 251, 229, 245, 166, 5, 12, 0, 89, 175, 141, 13, 12, 239, 95, 139, 18, 15, 8, 8, 31, 2, 4, 15, 207, 93, 139, 6, 11, 3, 166, 106, 97, 210, 162, 113, 189, 20, 31, 251, 141, 180, 239, 139, 187, 163, 0, 12, 13, 2, 139, 110, 247, 106, 199, 13, 139, 4, 5, 15, 11, 3, 4, 1, 239, 93, 146, 139, 132, 253, 42, 177, 243, 152, 104, 13, 148, 225, 139, 148, 150, 9, 6, 211, 12, 3, 6, 177, 163, 139, 12, 4, 0, 8, 219, 144, 4, 10, 15, 109, 80, 3, 139, 4, 74, 15, 7, 1, 8, 7, 8, 1, 106, 57, 178, 99, 87, 77, 232, 10, 9, 15, 10, 139, 1, 12, 26, 106, 139, 7, 0, 7, 118, 107, 213, 149, 241, 23, 19, 124, 84, 106, 8, 10, 201, 3, 9, 15, 7, 1, 10, 139, 10, 13, 5, 12, 14, 69, 0, 5, 44, 76, 179, 25, 1, 11, 12, 15, 73, 3, 7, 11, 5, 3, 4, 10, 24, 10, 10, 7, 15, 13, 9, 0, 9, 6, 3, 77, 25, 179, 2, 239, 3, 9, 6, 114, 126, 197, 49, 11, 2, 239, 6, 7, 9, 4, 13, 10, 12, 2, 20, 9, 6, 12, 3, 1, 166, 5, 5, 11, 7, 4, 225, 239, 166, 6, 3, 14, 1, 100, 239, 11, 7, 7, 203, 208, 13, 8, 4, 11, 7, 13, 14, 5, 6, 15, 179, 112, 60, 122, 1, 15, 8, 93, 136, 90, 38, 153, 156, 62, 239, 226, 196, 83, 11, 0, 7, 12, 201, 227, 95, 54, 250, 250, 250, 180, 93, 8, 15, 13, 2, 10, 9, 8, 9, 10, 2, 2, 5, 6, 7, 6, 9, 8, 13, 13, 10, 9, 8, 6, 10, 58, 203, 253, 12, 15, 5, 15, 239, 239, 233, 83, 13, 6, 2, 48, 143, 16, 41, 11, 66, 4, 5, 6, 8, 129, 123, 2, 240, 183, 15, 14, 3, 239, 26, 200, 42, 93, 136, 90, 56, 104, 13, 148, 225, 34, 148, 150, 1, 8, 2, 4, 166, 223, 1, 170, 3, 12, 5, 14, 73, 4, 61, 112, 249, 1, 1, 37, 190, 39, 183, 55, 200, 191, 118, 12, 57, 0, 110, 81, 164, 12, 178, 30, 201, 42, 29, 49, 102, 171, 74, 106, 0, 3, 13, 5, 13, 240, 136, 184, 119, 218, 5, 8, 28, 251, 101, 6, 168, 208, 126, 13, 12, 247, 235, 8, 5, 6, 214, 231, 255, 169, 69, 25, 12, 234, 6, 13, 237, 4, 3, 0, 14, 6, 95, 139, 18, 15, 8, 8, 31, 15, 14, 197, 107, 200, 15, 5, 14, 2, 192, 54, 0, 25, 86, 249, 231, 8, 7, 5, 5, 183, 11, 0, 15, 0, 0, 11, 218, 213, 177, 42, 13, 0, 1, 14, 11, 12, 12, 0, 15, 15, 8, 13, 239, 239, 12, 1, 6, 12, 45, 106, 196, 149, 6, 11, 13, 13, 1, 0, 2, 14, 14, 2, 3, 13, 1, 178, 58, 168, 57, 209, 2, 5, 154, 91, 171, 39, 112, 106, 5, 12, 11, 6, 166, 5, 1, 8, 4, 93, 12, 12, 138, 196, 61, 127, 239, 207, 94, 2, 120, 151, 207, 200, 96, 121, 8, 0, 15, 2, 4, 1, 9, 4, 9, 0, 10, 2, 1, 3, 8, 9, 12, 0, 15, 12, 11, 14, 52, 44, 11, 11, 6, 1, 156, 245, 171, 1, 178, 1, 13, 166, 1, 4, 112, 27, 151, 93, 169, 2, 4, 2, 7, 14, 15, 174, 6, 89, 55, 3, 4, 168, 245, 211, 11, 4, 9, 128, 115, 239, 3, 239, 0, 4, 8, 15, 9, 239, 3, 9, 6, 114, 126, 197, 49, 139, 239, 6, 4, 14, 13, 5, 1, 0, 9, 106, 239, 166, 20, 65, 89, 177, 101, 104, 38, 174, 219, 21, 83, 250, 209, 210, 241, 166, 239, 239, 13, 0, 8, 5, 12, 8, 0, 10, 2, 1, 3, 8, 9, 12, 0, 15, 12, 11, 14, 52, 44, 11, 211, 12, 3, 6, 5, 4, 210, 95, 128, 20, 204, 4, 13, 15, 14, 3, 4, 10, 7, 76, 239, 116, 58, 116, 7, 166, 8, 12, 0, 250, 250, 15, 15, 254, 99, 209, 10, 14, 110, 155, 253, 93, 239, 2, 33, 1, 224, 155, 23, 224, 104, 0, 9, 171, 200, 159, 67, 191, 159, 143, 1, 15, 15, 0, 225, 157, 182, 6, 84, 192, 106, 12, 1, 9, 11, 13, 6, 2, 5, 4, 106, 11, 12, 6, 1, 4, 3, 1, 15, 0, 15, 8, 195, 54, 1, 11, 0, 249, 197, 166, 215, 199, 113, 237, 51, 130, 221, 188, 250, 117, 15, 70, 72, 163, 107, 188, 60, 14, 101, 108, 90, 145, 44, 8, 2, 1, 14, 7, 3, 9, 2, 139, 216, 152, 0, 104, 13, 148, 225, 34, 148, 150, 1, 8, 20, 145, 44, 69, 140, 0, 8, 8, 11, 20, 239, 239, 9, 54, 187, 197, 4, 6, 102, 15, 179, 112, 250, 250, 1, 5, 14, 11, 13, 13, 166, 109, 153, 106, 178, 109, 80, 203, 97, 10, 1, 0, 13, 9, 75, 150, 144, 98, 59, 0, 7, 12, 12, 138, 196, 239, 11, 216, 33, 28, 251, 101, 196, 129, 143, 126, 8, 7, 4, 238, 92, 239, 14, 2, 2, 239, 14, 2, 4, 145, 90, 239, 63, 99, 233, 99, 166, 255, 20, 100, 249, 77, 233, 67, 93, 152, 14, 12, 0, 218, 163, 176, 17, 229, 198, 74, 12, 6, 7, 6, 12, 6, 156, 23, 247, 124, 59, 9, 1, 7, 14, 12, 14, 6, 239, 166, 3, 3, 6, 5, 6, 13, 5, 4, 10, 2, 0, 12, 1, 5, 11, 15, 9, 166, 166, 57, 0, 110, 81, 164, 15, 243, 128, 11, 7, 239, 0, 13, 6, 38, 8, 6, 10, 13, 14, 0, 13, 6, 4, 7, 11, 6, 5, 57, 93, 114, 195, 101, 7, 9, 1, 10, 14, 0, 197, 92, 30, 10, 11, 11, 11, 239, 15, 0, 11, 13, 8, 233, 67, 252, 72, 0, 12, 90, 81, 141, 129, 88, 4, 10, 4, 7, 13, 0, 220, 118, 34, 149, 224, 187, 247, 220, 1, 6, 10, 13, 5, 4, 14, 15, 4, 7, 223, 1, 3, 1, 4, 15, 4, 4, 10, 9, 13, 3, 6, 84, 87, 140, 6, 2, 12, 7, 13, 12, 9, 14, 93, 166, 6, 9, 9, 11, 13, 12, 7, 4, 239, 239, 11, 11, 2, 7, 118, 1, 5, 10, 104, 197, 239, 9, 10, 14, 13, 5, 2, 10, 14, 13, 13, 239, 20, 8, 0, 15, 14, 197, 92, 30, 10, 11, 11, 11, 2, 8, 239, 1, 239, 13, 13, 10, 12, 2, 5, 250, 250, 250, 250, 66, 72, 127, 3, 123, 84, 85, 130, 10, 3, 106, 106, 9, 60, 139, 216, 152, 0, 90, 145, 44, 8, 45, 15, 8, 7, 7, 93, 106, 106, 174, 1, 2, 0, 9, 14, 6, 4, 14, 3, 4, 239, 90, 38, 153, 156, 217, 14, 9, 8, 12, 14, 15, 9, 14, 57, 0, 0, 61, 1, 42, 255, 3, 7, 6, 6, 3, 3, 5, 15, 11, 13, 16, 156, 20, 93, 30, 201, 1, 12, 7, 8, 8, 13, 240, 2, 75, 143, 225, 54, 59, 233, 110, 7, 235, 239, 202, 106, 80, 81, 216, 159, 194, 229, 72, 86, 144, 209, 9, 239, 24, 176, 181, 117, 45, 0, 2, 166, 7, 11, 13, 8, 20, 11, 7, 7, 239, 15, 166, 11, 62, 208, 174, 224, 73, 10, 166, 147, 124, 13, 13, 3, 13, 14, 1, 5, 62, 190, 151, 32, 214, 56, 32, 150, 2, 6, 11, 1, 7, 8, 14, 4, 15, 21, 13, 13, 8, 13, 14, 2, 134, 19, 93, 11, 93, 4, 5, 1, 6, 13, 0, 4, 1, 12, 1, 6, 2, 13, 3, 240, 94, 14, 3, 80, 61, 183, 10, 13, 5, 2, 61, 48, 166, 4, 6, 14, 51, 88, 253, 157, 81, 208, 106, 3, 6, 11, 3, 93, 160, 117, 4, 3, 13, 6, 38, 166, 239, 31, 251, 141, 247, 11, 156, 208, 142, 36, 26, 187, 172, 72, 100, 183, 3, 9, 93, 203, 253, 175, 102, 13, 6, 12, 8, 5, 14, 105, 200, 243, 172, 239, 2, 150, 128, 211, 239, 66, 108, 68, 74, 174, 88, 57, 128, 138, 153, 168, 234, 164, 164, 171, 2, 15, 14, 1, 4, 3, 2, 10, 152, 250, 250, 250, 250, 4, 10, 2, 5, 3, 2, 10, 135, 229, 238, 35, 3, 7, 11, 0, 7, 12, 12, 138, 196, 61, 127, 250, 4, 11, 5, 26, 209, 221, 109, 120, 106, 203, 203, 203, 85, 123, 169, 8, 239, 13, 5, 15, 6, 7, 1, 14, 6, 12, 1, 9, 11, 13, 239, 0, 15, 12, 13, 8, 13, 14, 5, 10, 3, 8, 15, 159, 29, 118, 12, 8, 1, 12, 8, 9, 11, 239, 15, 106, 106, 168, 208, 166, 199, 13, 121, 250, 250, 250, 66, 72, 4, 7, 3, 166, 7, 3, 7, 142, 23, 7, 195, 105, 156, 127, 250, 6, 4, 14, 13, 5, 239, 239, 2, 10, 1, 215, 59, 241, 157, 6, 7, 8, 9, 4, 13, 9, 12, 7, 32, 241, 82, 166, 122, 168, 81, 7, 0, 158, 110, 50, 14, 17, 239, 190, 39, 183, 55, 200, 191, 7, 75, 119, 179, 2, 7, 251, 1, 237, 7, 8, 2, 0, 1, 9, 11, 239, 6, 12, 7, 14, 10, 7, 15, 2, 2, 3, 0, 6, 10, 166, 8, 5, 12, 2, 7, 9, 4, 106, 178, 109, 80, 6, 5, 12, 3, 12, 12, 9, 5, 10, 1, 166, 217, 86, 174, 1, 1, 1, 151, 7, 2, 14, 239, 70, 239, 106, 106, 254, 151, 200, 2, 224, 16, 206, 81, 245, 4, 13, 2, 239, 159, 69, 87, 5, 134, 114, 0, 5, 5, 231, 227, 195, 6, 11, 3, 3, 15, 13, 7, 177, 8, 85, 246, 202, 89, 210, 9, 0, 166, 7, 11, 5, 10, 1, 109, 106, 3, 3, 2, 10, 1, 239, 255, 146, 45, 231, 140, 15, 9, 15, 14, 12, 7, 3, 166, 1, 5, 8, 116, 58, 116, 10, 5, 14, 8, 16, 210, 82, 248, 1, 2, 15, 13, 0, 8, 5, 12, 8, 8, 14, 3, 10, 5, 15, 7, 121, 8, 117, 122, 239, 200, 254, 204, 188, 167, 239, 14, 15, 212, 1, 217, 166, 61, 127, 250, 4, 11, 5, 26, 209, 221, 109, 120, 58, 168, 57, 209, 2, 5, 154, 91, 171, 39, 112, 106, 239, 15, 1, 1, 2, 224, 69, 0, 15, 106, 118, 60, 2, 12, 4, 1, 11, 1, 13, 13, 0, 8, 13, 182, 74, 193, 144, 89, 106, 6, 248, 1, 123, 5, 1, 15, 9, 2, 166, 126, 13, 8, 175, 159, 220, 4, 64, 110, 37, 117, 130, 214, 8, 201, 51, 9, 3, 14, 250, 250, 250, 250, 15, 3, 9, 13, 3, 5, 106, 109, 203, 8, 9, 4, 15, 5, 7, 9, 12, 186, 51, 9, 3, 14, 6, 95, 139, 18, 86, 3, 11, 60, 6, 4, 5, 13, 7, 239, 2, 1, 8, 7, 8, 1, 106, 106, 75, 3, 8, 7, 12, 4, 10, 4, 7, 13, 1, 9, 12, 4, 3, 9, 12, 4, 7, 8, 10, 0, 232, 23, 132, 233, 9, 1, 15, 1, 76, 229, 144, 2, 9, 13, 113, 106, 93, 244, 130, 39, 103, 250, 15, 6, 7, 7, 166, 13, 1, 15, 15, 1, 12, 117, 121, 189, 250, 250, 250, 2, 5, 172, 12, 1, 248, 104, 46, 15, 14, 0, 11, 12, 9, 3, 13, 3, 4, 0, 11, 140, 86, 88, 148, 114, 171, 5, 71, 1, 1, 14, 207, 7, 7, 13, 4, 7, 14, 10, 9, 5, 15, 2, 0, 2, 8, 3, 63, 33, 69, 1, 12, 12, 6, 250, 250, 130, 19, 62, 33, 15, 15, 66, 156, 146, 107, 188, 166, 239, 31, 251, 141, 250, 250, 250, 109, 92, 187, 132, 178, 25, 203, 106, 240, 7, 6, 4, 0, 8, 0, 15, 14, 197, 107, 200, 254, 15, 246, 254, 99, 209, 10, 1, 0, 13, 11, 9, 0, 92, 95, 68, 49, 137, 230, 13, 7, 177, 8, 15, 3, 3, 6, 84, 87, 140, 6, 2, 12, 7, 13, 12, 9, 239, 2, 4, 13, 14, 5, 6, 15, 20, 9, 12, 4, 0, 8, 106, 21, 180, 223, 91, 117, 113, 179, 123, 173, 49, 6, 12, 0, 10, 9, 11, 6, 15, 13, 120, 62, 9, 14, 2, 4, 166, 62, 80, 250, 70, 12, 3, 166, 166, 185, 249, 1, 1, 5, 166, 0, 11, 8, 10, 0, 30, 164, 170, 88, 127, 145, 14, 2, 9, 5, 12, 11, 6, 246, 166, 8, 201, 5, 8, 4, 5, 15, 9, 12, 4, 0, 8, 106, 21, 180, 239, 15, 4, 5, 1, 9, 12, 11, 3, 185, 232, 254, 12, 15, 4, 10, 15, 4, 8, 9, 0, 9, 6, 5, 252, 143, 134, 5, 69, 2, 250, 21, 83, 250, 209, 9, 10, 8, 157, 81, 208, 106, 3, 81, 141, 129, 93, 7, 106, 130, 36, 11, 5, 5, 5, 231, 20, 88, 157, 120, 192, 15, 3, 6, 0, 7, 10, 239, 15, 13, 11, 2, 10, 6, 9, 9, 11, 13, 12, 7, 4, 10, 2, 6, 5, 11, 18, 165, 249, 187, 6, 12, 1, 13, 7, 2, 13, 13, 13, 242, 130, 39, 103, 209, 155, 97, 122, 241, 97, 101, 227, 64, 20, 99, 156, 3, 189, 8, 190, 199, 254, 157, 67, 58, 234, 101, 54, 65, 14, 4, 8, 12, 0, 12, 6, 21, 48, 83, 157, 7, 5, 14, 13, 12, 68, 74, 106, 0, 3, 8, 0, 0, 0, 9, 14, 7, 250, 149, 106, 27, 246, 13, 11, 9, 6, 51, 9, 3, 14, 166, 78, 247, 12, 13, 5, 4, 14, 121, 46, 214, 247, 181, 54, 106, 238, 64, 118, 232, 23, 132, 233, 34, 182, 71, 45, 131, 117, 184, 5, 13, 3, 7, 11, 14, 0, 1, 13, 14, 15, 0, 106, 254, 151, 200, 3, 9, 215, 106, 0, 4, 15, 15, 42, 198, 108, 193, 1, 14, 8, 0, 9, 15, 0, 7, 13, 0, 1, 14, 11, 14, 11, 7, 0, 109, 117, 52, 179, 24, 2, 14, 7, 8, 11, 3, 8, 115, 27, 197, 107, 100, 212, 77, 254, 132, 7, 12, 14, 0, 14, 250, 250, 250, 250, 202, 106, 106, 106, 106, 106, 3, 6, 7, 2, 12, 72, 13, 15, 11, 196, 61, 99, 152, 3, 44, 137, 96, 56, 98, 172, 6, 10, 11, 13, 11, 9, 0, 4, 1, 0, 13, 8, 0, 12, 7, 3, 80, 61, 183, 1, 10, 10, 8, 2, 5, 0, 0, 13, 9, 14, 8, 5, 14, 2, 11, 106, 106, 106, 75, 3, 10, 2, 1, 12, 7, 12, 14, 106, 109, 106, 253, 130, 228, 159, 211, 84, 112, 167, 13, 0, 11, 8, 4, 5, 15, 5, 0, 8, 4, 7, 1, 9, 2, 0, 4, 3, 1, 14, 15, 5, 11, 254, 162, 122, 28, 212, 81, 121, 225, 54, 59, 233, 110, 155, 253, 152, 140, 133, 250, 250, 250, 250, 250, 250, 9, 6, 9, 13, 5, 6, 3, 14, 128, 218, 231, 128, 13, 4, 12, 3, 8, 179, 114, 125, 76, 147, 124, 13, 13, 3, 13, 14, 6, 132, 184, 135, 46, 11, 23, 233, 172, 254, 7, 8, 75, 178, 67, 3, 8, 10, 236, 135, 252, 7, 78, 74, 15, 3, 4, 14, 10, 12, 9, 14, 6, 11, 7, 7, 1, 11, 0, 249, 197, 220, 95, 51, 120, 151, 207, 200, 96, 121, 8, 117, 122, 208, 109, 1, 7, 5, 0, 1, 1, 6, 4, 15, 14, 13, 15, 4, 1, 5, 2, 92, 70, 126, 18, 106, 0, 10, 15, 106, 11, 0, 4, 15, 1, 6, 190, 54, 214, 159, 113, 106, 109, 21, 13, 13, 8, 13, 8, 188, 26, 30, 38, 0, 2, 2, 4, 6, 241, 144, 46, 43, 86, 52, 65, 190, 34, 148, 150, 206, 11, 14, 11, 7, 0, 109, 117, 52, 118, 60, 220, 234, 109, 106, 196, 118, 119, 106, 233, 122, 130, 214, 8, 201, 5, 224, 7, 14, 3, 9, 12, 4, 0, 8, 106, 21, 180, 223, 91, 117, 159, 5, 14, 12, 134, 14, 2, 8, 145, 241, 12, 86, 232, 51, 79, 91, 112, 92, 187, 132, 253, 42, 209, 101, 164, 149, 237, 183, 36, 6, 11, 14, 9, 11, 9, 13, 0, 11, 7, 2, 3, 10, 9, 15, 14, 1, 1, 7, 8, 200, 208, 33, 14, 15, 14, 14, 1, 9, 0, 12, 12, 15, 13, 0, 11, 56, 21, 194, 4, 47, 45, 108, 153, 244, 18, 216, 105, 106, 106, 106, 53, 250, 250, 250, 250, 250, 250, 250, 250, 12, 4, 1, 251, 229, 198, 74, 90, 47, 178, 94, 250, 250, 250, 250, 4, 120, 59, 238, 77, 233, 67, 252, 127, 214, 255, 15, 6, 11, 9, 3, 240, 94, 175, 68, 54, 232, 3, 13, 225, 239, 241, 17, 250, 250, 250, 250, 5, 3, 9, 6, 9, 7, 6, 1, 7, 194, 124, 21, 214, 12, 4, 4, 8, 5, 10, 9, 4, 7, 2, 7, 0, 3, 14, 5, 1, 11, 2, 15, 106, 6, 11, 15, 8, 13, 10, 7, 0, 12, 14, 11, 106, 109, 106, 106, 85, 13, 73, 135, 1, 13, 0, 1, 204, 27, 106, 106, 3, 83, 112, 14, 15, 3, 3, 6, 5, 6, 1, 1, 15, 14, 5, 4, 3, 9, 7, 14, 15, 4, 5, 1, 9, 12, 11, 3, 185, 232, 254, 12, 15, 106, 11, 0, 3, 12, 13, 10, 7, 14, 15, 5, 2, 7, 14, 7, 6, 48, 143, 85, 64, 250, 250, 250, 250, 0, 4, 15, 106, 106, 168, 208, 126, 13, 120, 62, 60, 45, 231, 140, 203, 175, 60, 139, 216, 152, 183, 11, 22, 40, 239, 2, 75, 143, 16, 41, 9, 14, 5, 67, 11, 5, 177, 167, 134, 114, 61, 195, 206, 14, 6, 3, 223, 106, 148, 234, 47, 33, 204, 250, 250, 250, 35, 128, 187, 95, 69, 97, 204, 146, 160, 117, 4, 3, 2, 10, 152, 46, 106, 1, 0, 5, 5, 0, 5, 14, 15, 241, 82, 178, 196, 173, 157, 140, 118, 128, 14, 250, 250, 250, 15, 1, 5, 3, 4, 15, 2, 1, 12, 14, 15, 9, 7, 44, 10, 1, 8, 7, 29, 155, 106, 117, 6, 129, 215, 6, 11, 8, 4, 3, 11, 60, 6, 12, 0, 15, 12, 238, 202, 169, 13, 11, 12, 14, 6, 9, 2, 12, 4, 3, 6, 9, 7, 37, 212, 250, 125, 6, 248, 14, 72, 8, 4, 7, 4, 4, 1, 11, 82, 33, 1, 224, 155, 254, 122, 161, 39, 67, 59, 169, 250, 250, 250, 250, 250, 75, 199, 31, 69, 50, 244, 168, 7, 7, 9, 2, 4, 13, 14, 5, 6, 15, 11, 9, 12, 12, 201, 227, 95, 54, 250, 250, 250, 180, 113, 179, 123, 173, 49, 109, 199, 212, 103, 240, 80, 41, 12, 12, 13, 9, 13, 0, 23, 115, 2, 2, 0, 12, 14, 10, 8, 15, 6, 106, 106, 229, 202, 78, 1, 151, 98, 181, 198, 109, 1, 2, 9, 250, 250, 250, 15, 15, 9, 2, 12, 15, 10, 11, 5, 10, 14, 14, 2, 242, 147, 171, 2, 121, 250, 250, 250, 250, 250, 250, 250, 250, 164, 236, 144, 209, 32, 241, 82, 178, 196, 173, 35, 203, 182, 64, 189, 111, 159, 52, 115, 245, 38, 21, 43, 93, 48, 210, 173, 160, 253, 75, 178, 33, 112, 219, 142, 77, 103, 176, 31, 148, 51, 201, 31, 39, 223, 155, 153, 208, 16, 81, 172, 211, 116, 113, 168, 33, 95, 187, 87, 18, 246, 185, 51, 137, 247, 248, 1, 246, 179, 21, 70, 200, 126, 32, 37, 202, 220, 128, 80, 119, 135, 165, 245, 150, 72, 201, 131, 122, 130, 214, 68, 82, 81, 161, 199, 78, 75, 208, 81, 121, 175, 54, 199, 90, 24, 242, 196, 30, 88, 138, 138, 169, 35, 252, 160, 32, 126, 155, 151, 197, 167, 207, 54, 129, 38, 103, 148, 243, 129, 245, 245, 52, 181, 189, 138, 47, 236, 140, 15, 88, 180, 146, 106, 77, 108, 48, 96, 47, 144, 108, 0, 18, 211, 217, 175, 190, 56, 190, 117, 210, 162, 113, 189, 173, 144, 60, 38, 93, 242, 18, 247, 75, 142, 240, 123, 174, 221, 188, 203, 70, 139, 159, 209, 72, 220, 16, 85, 127, 140, 86, 111, 192, 249, 143, 140, 255, 70, 6, 12, 11, 14, 232, 208, 80, 88, 33, 129, 56, 87, 78, 217, 196, 58, 239, 87, 8, 238, 104, 103, 189, 68, 30, 11, 41, 240, 108, 75, 67, 30, 255, 213, 31, 224, 74, 111, 36, 45, 59, 77, 158, 101, 58, 182, 47, 166, 140, 197, 100, 231, 131, 23, 220, 125, 2, 215, 49, 209, 61, 82, 82, 179, 129, 91, 135, 42, 231, 93, 76, 188, 240, 210, 148, 230, 105, 24, 124, 54, 150, 172, 73, 179, 112, 60, 139, 216, 152, 183, 11, 22, 40, 239, 251, 15, 166, 45, 135, 146, 103, 81, 32, 210, 123, 111, 29, 151, 202, 253, 173, 181, 220, 242, 230, 247, 115, 222, 225, 73, 179, 178, 218, 21, 66, 111, 20, 183, 121, 110, 206, 64, 160, 125, 171, 139, 252, 77, 186, 193, 190, 156, 177, 190, 205, 205, 179, 10, 109, 11, 50, 109, 221, 97, 252, 230, 134, 50, 87, 23, 33, 236, 149, 224, 201, 85, 202, 20, 192, 40, 197, 99, 143, 109, 103, 105, 35, 77, 227, 200, 243, 172, 32, 215, 59, 90, 140, 195, 24, 180, 58, 29, 174, 88, 57, 180, 238, 255, 82, 34, 225, 89, 122, 134, 22, 60, 163, 161, 11, 97, 158, 233, 186, 60, 238, 190, 164, 22, 192, 230, 207, 222, 21, 121, 153, 62, 201, 51, 224, 97, 128, 220, 231, 45, 135, 156, 7, 201, 108, 82, 244, 211, 28, 53, 202, 13, 40, 76, 140, 159, 220, 52, 162, 49, 67, 252, 127, 214, 158, 194, 26, 250, 28, 200, 202, 123, 152, 113, 154, 91, 1, 153, 151, 238, 113, 156, 69, 171, 152, 30, 26, 110, 197, 216, 207, 202, 102, 172, 195, 9, 95, 248, 146, 194, 96, 25, 195, 45, 96, 23, 227, 39, 188, 120, 62, 246, 67, 9, 255, 182, 164, 34, 107, 19, 84, 159, 161, 27, 219, 74, 2, 94, 194, 218, 79, 24, 113, 153, 101, 105, 92, 74, 50, 254, 191, 138, 158, 158, 95, 122, 82, 242, 104, 38, 180, 3, 83, 112, 33, 20, 7, 127, 119, 152, 13, 201, 8, 254, 247, 79, 85, 140, 174, 170, 220, 25, 195, 177, 79, 164, 156, 205, 186, 30, 162, 148, 235, 227, 226, 171, 83, 196, 87, 116, 193, 18, 236, 60, 4, 58, 167, 98, 203, 123, 106, 167, 180, 16, 155, 254, 122, 161, 36, 234, 251, 164, 12, 177, 104, 29, 92, 173, 50, 113, 79, 2, 80, 229, 56, 146, 142, 138, 216, 135, 98, 62, 120, 97, 182, 182, 201, 5, 224, 238, 52, 63, 61, 179, 0, 83, 87, 188, 15, 142, 136, 176, 150, 9, 169, 94, 67, 160, 31, 159, 139, 82, 56, 147, 138, 104, 120, 53, 113, 116, 37, 111, 33, 77, 187, 215, 224, 219, 115, 193, 89, 228, 114, 178, 97, 101, 67, 243, 235, 74, 36, 78, 31, 227, 202, 92, 20, 220, 57, 252, 172, 128, 124, 105, 152, 204, 154, 98, 252, 84, 102, 232, 17, 133, 89, 64, 105, 186, 20, 55, 213, 18, 177, 249, 117, 211, 101, 96, 82, 151, 193, 199, 154, 69, 122, 223, 208, 91, 17, 253, 178, 117, 13, 157, 240, 83, 213, 105, 31, 251, 234, 99, 73, 216, 16, 99, 66, 115, 255, 66, 63, 82, 174, 78, 15, 209, 104, 90, 76, 52, 208, 65, 103, 83, 67, 125, 54, 131, 253, 144, 145, 108, 73, 171, 184, 7, 222, 174, 161, 173, 43, 135, 123, 83, 118, 223, 202, 33, 32, 249, 183, 47, 205, 57, 106, 240, 84, 136, 31, 255, 138, 117, 151, 93, 201, 130, 230, 38, 115, 227, 162, 29, 11, 181, 115, 162, 197, 243, 0, 222, 183, 170, 82, 104, 33, 91, 120, 175, 214, 8, 180, 68, 141, 255, 116, 47, 165, 114, 46, 43, 204, 186, 14, 64, 136, 219, 178, 124, 173, 199, 158, 26, 26, 170, 91, 35, 82, 196, 88, 148, 114, 171, 216, 229, 162, 42, 139, 157, 216, 137, 22, 79, 121, 239, 217, 82, 2, 116, 12, 193, 104, 218, 174, 128, 28, 148, 13, 176, 40, 212, 41, 180, 236, 96, 107, 160, 244, 134, 212, 193, 113, 122, 102, 13, 227, 108, 1, 178, 23, 234, 143, 49, 120, 97, 11, 59, 72, 160, 107, 227, 212, 113, 23, 5, 57, 60, 106, 163, 20, 39, 246, 105, 149, 3, 214, 204, 145, 249, 139, 17, 184, 32, 2, 174, 151, 98, 181, 198, 109, 233, 115, 135, 229, 238, 35, 148, 27, 220, 25, 65, 10, 226, 18, 165, 249, 187, 116, 26, 197, 116, 39, 174, 123, 208, 59, 89, 14, 72, 8, 155, 72, 213, 66, 122, 95, 93, 86, 176, 161, 54, 204, 25, 192, 2, 150, 168, 9, 162, 6, 124, 119, 111, 14, 234, 168, 91, 10, 115, 104, 189, 175, 225, 92, 22, 139, 57, 243, 118, 208, 37, 241, 130, 210, 135, 255, 177, 126, 237, 133, 85, 209, 33, 207, 142, 202, 88, 47, 192, 126, 79, 232, 90, 90, 210, 136, 190, 104, 158, 0, 103, 69, 195, 59, 27, 14, 42, 46, 166, 123, 11, 130, 247, 72, 149, 110, 238, 161, 132, 108, 188, 134, 126, 67, 193, 131, 207, 71, 89, 174, 114, 151, 90, 59, 210, 14, 223, 31, 130, 89, 13, 37, 8, 18, 183, 7, 77, 18, 52, 237, 125, 191, 74, 176, 80, 205, 161, 223, 75, 130, 64, 83, 223, 29, 35, 96, 155, 29, 30, 177, 138, 227, 171, 93, 28, 114, 106, 87, 20, 34, 207, 85, 160, 145, 28, 23, 42, 48, 243, 180, 21, 42, 203, 186, 64, 91, 57, 142, 142, 113, 37, 139, 251, 118, 159, 164, 250, 149, 189, 233, 222, 165, 161, 101, 107, 79, 137, 83, 216, 153, 122, 185, 54, 120, 133, 160, 167, 66, 172, 86, 252, 121, 137, 10, 248, 141, 94, 99, 82, 34, 181, 124, 13, 110, 200, 188, 64, 132, 180, 108, 45, 255, 99, 51, 67, 244, 211, 202, 209, 78, 224, 149, 19, 239, 173, 50, 156, 166, 151, 62, 41, 107, 70, 84, 191, 211, 161, 94, 203, 238, 128, 118, 71, 165, 228, 64, 147, 236, 226, 17, 211, 1, 95, 199, 50, 253, 236, 204, 100, 230, 89, 10, 146, 166, 209, 2, 177, 131, 132, 142, 199, 241, 13, 4, 232, 41, 6, 213, 247, 162, 213, 202, 17, 15, 15, 11, 104, 25, 92, 213, 114, 88, 178, 4, 77, 136, 210, 167, 241, 158, 146, 69, 201, 199, 146, 104, 137, 15, 171, 225, 252, 226, 125, 132, 246, 253, 241, 31, 181, 181, 102, 45, 163, 172, 97, 181, 99, 98, 136, 232, 216, 131, 234, 131, 132, 160, 8, 28, 136, 118, 116, 118, 165, 93, 159, 173, 90, 101, 207, 188, 96, 128, 165, 94, 203, 11, 57, 8, 14, 94, 145, 242, 233, 201, 1, 235, 183, 20, 112, 4, 133, 1, 188, 213, 222, 49, 227, 134, 115, 226, 20, 161, 153, 114, 33, 76, 91, 219, 211, 92, 241, 151, 47, 25, 108, 190, 204, 253, 224, 117, 72, 188, 80, 143, 67, 236, 127, 87, 29, 117, 10, 253, 209, 120, 17, 35, 159, 173, 154, 81, 35, 213, 219, 132, 32, 15, 219, 25, 159, 184, 122, 94, 123, 6, 214, 18, 166, 17, 11, 81, 91, 79, 50, 253, 163, 220, 43, 64, 69, 195, 127, 123, 81, 9, 193, 165, 134, 98, 167, 108, 58, 34, 142, 182, 209, 78, 177, 22, 25, 40, 74, 213, 160, 37, 75, 1, 231, 191, 115, 76, 139, 90, 24, 161, 228, 187, 19, 33, 73, 65, 16, 251, 191, 36, 126, 119, 102, 143, 67, 219, 172, 8, 220, 97, 162, 110, 75, 125, 94, 15, 210, 166, 146, 224, 201, 196, 252, 129, 230, 2, 242, 229, 85, 19, 215, 220, 17, 49, 226, 72, 83, 189, 71, 207, 255, 48, 27, 8, 106, 219, 232, 3, 132, 132, 119, 162, 133, 148, 42, 82, 44, 11, 0, 175, 183, 160, 56, 170, 46, 254, 166, 108, 139, 253, 148, 114, 105, 142, 192, 20, 79, 109, 152, 172, 26, 237, 13, 211, 168, 225, 250, 115, 70, 35, 114, 153, 133, 130, 150, 14, 9, 82, 40, 61, 172, 64, 61, 32, 238, 131, 205, 141, 192, 48, 239, 114, 36, 99, 141, 208, 120, 121, 29, 89, 138, 49, 109, 167, 148, 9, 52, 179, 125, 115, 175, 151, 92, 62, 137, 145, 24, 76, 231, 224, 243, 193, 13, 179, 40, 0, 243, 234, 149, 229, 35, 218, 123, 202, 145, 209, 41, 208, 199, 238, 47, 131, 21, 242, 153, 239, 14, 113, 104, 142, 125, 110, 246, 207, 127, 199, 150, 237, 215, 96, 81, 249, 215, 102, 113, 146, 130, 97, 71, 22, 215, 214, 145, 136, 244, 27, 245, 107, 11, 13, 136, 153, 138, 207, 216, 158, 161, 27, 43, 16, 25, 11, 234, 76, 228, 153, 51, 173, 61, 153, 110, 215, 207, 252, 224, 197, 111, 56, 244, 63, 50, 81, 157, 251, 2, 125, 151, 138, 45, 165, 140, 120, 251, 119, 134, 233, 106, 193, 84, 225, 99, 90, 227, 204, 191, 203, 158, 32, 127, 210, 54, 151, 67, 106, 63, 190, 44, 134, 44, 116, 31, 5, 74, 12, 150, 247, 137, 167, 163, 133, 43, 165, 229, 222, 115, 152, 251, 42, 166, 214, 100, 111, 3, 32, 157, 183, 122, 170, 100, 150, 163, 111, 144, 253, 169, 111, 82, 241, 1, 41, 26, 130, 170, 75, 26, 91, 202, 243, 92, 170, 114, 130, 107, 11, 180, 41, 241, 153, 149, 224, 225, 111, 243, 18, 117, 237, 5, 74, 77, 50, 80, 205, 162, 21, 139, 222, 131, 237, 39, 54, 38, 198, 219, 172, 61, 87, 14, 91, 65, 133, 223, 188, 72, 143, 80, 16, 204, 187, 160, 232, 223, 245, 130, 41, 86, 251, 1, 152, 161, 33, 171, 125, 1, 129, 151, 36, 137, 27, 88, 5, 146, 42, 73, 14, 72, 229, 240, 18, 77, 186, 80, 124, 216, 183, 98, 243, 252, 86, 198, 119, 187, 30, 30, 168, 0, 99, 55, 226, 75, 145, 16, 224, 65, 148, 31, 64, 135, 203, 7, 144, 60, 21, 174, 233, 154, 175, 58, 3, 147, 215, 201, 152, 251, 22, 198, 27, 9, 21, 232, 170, 238, 62, 36, 4, 33, 200, 251, 76, 213, 62, 113, 207, 1, 46, 55, 6, 159, 140, 90, 108, 102, 105, 67, 88, 222, 196, 66, 51, 13, 135, 168, 244, 156, 62, 122, 93, 52, 146, 93, 0, 147, 96, 41, 215, 232, 208, 132, 255, 115, 113, 37, 236, 24, 168, 170, 187, 123, 27, 214, 176, 120, 243, 54, 97, 165, 78, 171, 108, 220, 241, 226, 125, 79, 216, 208, 69, 121, 183, 214, 150, 155, 135, 122, 67, 8, 54, 8, 15, 116, 72, 229, 220, 250, 96, 230, 95, 213, 209, 113, 50, 195, 77, 47, 131, 142, 58, 11, 8, 4, 15, 13, 15, 3, 0, 1, 12, 11, 8, 4, 15, 13, 15, 11, 9, 8, 9, 8, 0, 1, 1, 7, 14, 1, 1, 1, 1, 1, 14, 11, 1, 1, 7, 4, 1, 14, 11, 1, 7, 15, 5, 15, 7, 4, 1, 14, 11, 1, 7, 15, 5, 15, 7, 4, 1, 14, 11, 1, 7, 15, 5, 10, 13, 14, 11, 1, 7, 15, 5, 15, 7, 9, 1, 14, 11, 1, 1, 4, 1, 1, 7, 12, 1, 1, 7, 7, 10, 2, 0, 1, 6, 4, 4, 1, 14, 1, 0, 1, 1, 14, 14, 13, 7, 13, 3, 0, 1, 1, 13, 4, 1, 14, 13, 1, 2, 1, 10, 1, 1, 1, 8, 5, 5, 12, 5, 2, 8, 1, 14, 11, 12, 11, 8, 4, 13, 9, 1, 5, 9, 13, 1, 1, 7, 2, 7, 14, 1, 1, 1, 7, 14, 1, 1, 1, 4, 14, 13, 4, 1, 1, 12, 11, 8, 7, 4, 1, 14, 1, 9, 13, 3, 14, 13, 1, 6, 15, 13, 7, 1, 4, 1, 1, 10, 11, 13, 14, 12, 8, 0, 1, 1, 7, 3, 8, 12, 8, 7, 0, 10, 13, 4, 1, 1, 14, 11, 1, 1, 7, 14, 1, 1, 1, 11, 12, 12, 8, 2, 3, 13, 3, 3, 13, 3, 14, 7, 15, 3, 11, 2, 12, 0, 6, 12, 6, 1, 0, 1, 1, 7, 1, 1, 1, 1, 11, 6, 14, 14, 14, 5, 4, 14, 10, 1, 5, 9, 13, 15, 14, 1, 7, 9, 4, 1, 12, 7, 5, 13, 13, 14, 4, 1, 1, 7, 11, 1, 1, 1, 1, 1, 7, 11, 12, 1, 1, 1, 7, 13, 0, 0, 12, 1, 1, 11, 8, 1, 1, 1, 7, 14, 1, 1, 1, 1, 7, 14, 1, 8, 14, 12, 8, 0, 1, 1, 7, 11, 1, 1, 10, 11, 13, 14, 12, 8, 0, 1, 1, 7, 3, 8, 4, 5, 1, 1, 1, 11, 12, 15, 3, 11, 15, 5, 11, 7, 4, 1, 4, 1, 10, 13, 14, 11, 1, 14, 11, 1, 1, 7, 10, 7, 1, 9, 13, 3, 3, 3, 2, 11, 1, 2, 7, 1, 1, 7, 1, 1, 1, 1, 4, 0, 11, 10, 13, 1, 6, 15, 13, 7, 1, 4, 1, 1, 10, 11, 13, 14, 2, 1, 1, 7, 11, 12, 7, 13, 4, 0, 6, 2, 8, 2, 10, 0, 11, 3, 12, 9, 8, 7, 0, 1, 7, 14, 13, 1, 3, 0, 1, 12, 11, 3, 11, 13, 7, 8, 1, 7, 2, 7, 14, 1, 1, 1, 7, 14, 15, 13, 15, 3, 0, 1, 12, 11, 8, 4, 1, 7, 14, 13, 1, 6, 15, 13, 7, 1, 10, 13, 4, 1, 8, 6, 15, 13, 7, 1, 4, 1, 1, 6, 7, 13, 3, 0, 1, 1, 13, 4, 1, 14, 13, 1, 2, 1, 10, 1, 1, 1, 5, 15, 7, 4, 1, 14, 7, 1, 1, 7, 11, 12, 1, 3, 1, 14, 11, 7, 11, 1, 1, 10, 11, 6, 4, 6, 7, 6, 2, 11, 1, 1, 7, 13, 13, 4, 1, 8, 1, 1, 7, 2, 1, 15, 11, 9, 8, 9, 8, 0, 1, 1, 7, 14, 1, 7, 11, 12, 1, 14, 4, 7, 1, 14, 11, 1, 1, 7, 6, 2, 11, 1, 1, 6, 1, 1, 1, 1, 4, 0, 11, 10, 13, 1, 6, 0, 9, 4, 1, 1, 1, 15, 15, 15, 5, 10, 1, 5, 9, 9, 4, 13, 9, 1, 3, 13, 3, 4, 5, 4, 14, 14, 1, 4, 0, 1, 12, 11, 8, 4, 15, 13, 15, 3, 3, 2, 1, 9, 1, 1, 1, 1, 1, 10, 7, 1, 12, 11, 3, 11, 13, 7, 8, 15, 15, 5, 11, 6, 5, 11, 6, 5, 9, 14, 5, 14, 11, 13, 4, 7, 13, 5, 15, 8, 7, 1, 1, 7, 11, 12, 1, 6, 10, 5, 2, 1, 14, 4, 11, 7, 13, 3, 13, 10, 13, 9, 1, 3, 13, 13, 7, 13, 14, 12, 15, 7, 15, 3, 11, 2, 0, 7, 13, 14, 2, 10, 0, 1, 1, 2, 1, 1, 1, 1, 11, 6, 12, 5, 1, 1, 7, 13, 14, 10, 11, 7, 15, 1, 3, 13, 3, 3, 13, 3, 14, 1, 14, 11, 1, 1, 12, 2, 11, 8, 4, 0, 12, 9, 9, 1, 1, 1, 1, 13, 4, 13, 8, 9, 13, 5, 7, 13, 11, 14, 8, 7, 1, 7, 15, 5, 12, 7, 12, 1, 7, 1, 13, 13, 1, 1, 4, 0, 11, 10, 13, 1, 6, 1, 7, 3, 8, 4, 5, 9, 11, 8, 1, 7, 7, 1, 8, 8, 10, 11, 6, 4, 6, 14, 11, 5, 7, 10, 5, 11, 7, 12, 8, 6, 9, 14, 7, 1, 9, 14, 0, 1, 12, 11, 8, 4, 8, 11, 11, 15, 14, 15, 10, 13, 1, 6, 15, 13, 7, 1, 4, 1, 1, 10, 11, 13, 5, 15, 0, 0, 11, 10, 13, 1, 6, 1, 9, 3, 3, 5, 5, 7, 6, 9, 0, 4, 10, 5, 1, 2, 0, 1, 15, 5, 11, 6})
fuzzDicts = append(fuzzDicts, []byte{55, 164, 48, 236, 138, 0, 161, 125, 86, 16, 40, 236, 140, 3, 47, 193, 140, 70, 28, 182, 7, 161, 23, 22, 85, 86, 191, 89, 56, 91, 146, 91, 161, 14, 129, 104, 70, 13, 6, 2, 45, 70, 140, 216, 244, 227, 121, 51, 72, 89, 196, 209, 112, 91, 156, 111, 124, 247, 9, 184, 37, 34, 169, 208, 189, 40, 42, 142, 68, 37, 223, 251, 58, 70, 175, 34, 3, 96, 84, 91, 65, 210, 8, 206, 72, 68, 203, 10, 217, 238, 138, 136, 8, 33, 1, 243, 0, 32, 12, 10, 135, 5, 131, 194, 185, 72, 44, 33, 150, 15, 4, 64, 1, 175, 38, 20, 218, 232, 18, 49, 200, 97, 8, 25, 99, 136, 33, 4, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 8, 0, 0, 0, 164, 203, 233, 101, 228, 185, 76, 28, 134, 73, 16, 83, 10, 41, 132, 12, 13, 201, 12, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 85, 85, 3, 172, 3, 172, 85, 3, 3, 149, 149, 149, 105, 161, 89, 141, 224, 175, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 153, 149, 149, 149, 149, 149, 149, 149, 87, 149, 149, 62, 149, 149, 149, 149, 149, 149, 76, 149, 186, 172, 172, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 37, 81, 161, 33, 149, 149, 149, 149, 231, 149, 79, 84, 149, 149, 248, 119, 6, 177, 149, 149, 149, 149, 149, 149, 238, 149, 149, 149, 178, 100, 124, 149, 149, 178, 100, 149, 149, 149, 203, 149, 149, 149, 149, 149, 149, 84, 149, 149, 149, 149, 0, 149, 149, 149, 149, 149, 37, 81, 161, 207, 149, 149, 149, 149, 149, 149, 149, 78, 224, 149, 149, 149, 149, 106, 85, 85, 85, 85, 85, 85, 85, 90, 177, 85, 177, 85, 8, 177, 85, 85, 3, 85, 85, 85, 85, 172, 85, 95, 172, 85, 85, 3, 172, 85, 8, 172, 85, 172, 85, 3, 177, 172, 3, 90, 172, 85, 172, 177, 85, 172, 85, 85, 85, 172, 172, 172, 172, 177, 85, 85, 85, 172, 3, 3, 182, 172, 172, 85, 90, 85, 3, 172, 3, 172, 85, 90, 3, 8, 85, 172, 172, 85, 90, 182, 90, 85, 85, 85, 95, 90, 85, 85, 182, 85, 85, 172, 85, 90, 177, 90, 172, 3, 172, 3, 172, 172, 90, 85, 85, 85, 177, 85, 85, 172, 85, 85, 85, 172, 85, 90, 3, 3, 85, 3, 172, 85, 85, 85, 85, 172, 172, 85, 90, 172, 172, 3, 172, 85, 13, 85, 3, 3, 172, 3, 13, 3, 172, 172, 3, 85, 85, 85, 85, 95, 90, 90, 177, 85, 95, 172, 85, 85, 3, 85, 85, 172, 85, 8, 85, 3, 85, 85, 172, 85, 85, 85, 85, 3, 85, 177, 85, 85, 3, 3, 172, 95, 172, 172, 8, 85, 172, 177, 172, 172, 172, 85, 8, 85, 8, 85, 85, 85, 3, 85, 85, 3, 172, 85, 3, 172, 85, 85, 3, 172, 172, 85, 85, 172, 177, 85, 254, 254, 254, 254, 72, 71, 72, 72, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 8, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 9, 5, 254, 254, 254, 254, 254, 254, 72, 72, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 71, 72, 72, 254, 254, 254, 254, 254, 72, 71, 71, 72, 254, 0, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 69, 72, 70, 72, 68, 72, 72, 72, 254, 254, 254, 9, 254, 14, 11, 7, 72, 71, 70, 72, 72, 72, 70, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 72, 71, 69, 72, 72, 72, 71, 71, 72, 70, 72, 72, 71, 72, 72, 72, 69, 71, 72, 71, 72, 71, 254, 254, 254, 254, 254, 254, 254, 254, 72, 72, 71, 71, 6, 6, 7, 14, 6, 254, 71, 72, 5, 254, 254, 9, 11, 7, 8, 2, 254, 254, 254, 254, 254, 254, 72, 71, 254, 6, 254, 254, 11, 9, 7, 2, 254, 254, 254, 72, 72, 69, 72, 72, 72, 72, 72, 71, 72, 72, 71, 67, 72, 72, 72, 71, 72, 71, 71, 11, 13, 254, 72, 72, 72, 72, 72, 72, 70, 70, 72, 71, 72, 69, 71, 72, 68, 71, 72, 72, 72, 72, 72, 70, 71, 72, 72, 72, 72, 69, 72, 70, 71, 72, 72, 72, 71, 72, 254, 12, 2, 254, 254, 12, 2, 13, 254, 11, 13, 254, 6, 254, 71, 72, 5, 254, 254, 9, 8, 14, 5, 254, 254, 254, 3, 14, 12, 8, 10, 254, 254, 254, 254, 254, 11, 13, 254, 254, 254, 254, 254, 5, 254, 254, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 146, 2, 53, 55, 10, 25, 104, 80, 22, 7, 249, 6, 180, 255, 141, 218, 159, 36, 6, 7, 14, 7, 14, 7, 14, 7, 14, 9, 14, 7, 14, 9, 14, 7, 14, 7, 14, 7, 14, 4, 7, 14, 7, 14, 7, 1, 9, 14, 7, 14, 7, 11, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 7, 7, 14, 9, 7, 7, 7, 14, 9, 6, 11, 14, 7, 14, 4, 10, 11, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 7, 14, 9, 7, 7, 7, 14, 7, 6, 4, 7, 14, 7, 7, 14, 9, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 7, 7, 7, 7, 14, 7, 7, 14, 13, 14, 6, 4, 7, 14, 7, 7, 14, 9, 9, 9, 9, 9, 0, 10, 11, 14, 9, 9, 14, 7, 3, 7, 11, 14, 4, 7, 14, 7, 7, 7, 14, 9, 7, 7, 7, 1, 13, 7, 7, 7, 6, 5, 2, 7, 14, 7, 14, 9, 14, 7, 7, 14, 9, 3, 14, 14, 14, 14, 7, 3, 12, 15, 11, 14, 7, 14, 13, 5, 4, 10, 11, 14, 4, 5, 2, 9, 9, 0, 10, 11, 11, 1, 9, 14, 7, 7, 7, 14, 9, 7, 7, 7, 14, 9, 7, 7, 7, 14, 7, 6, 4, 7, 14, 7, 7, 14, 9, 14, 4, 7, 7, 2, 1, 9, 14, 14, 7, 7, 14, 9, 7, 3, 7, 14, 7, 150, 150, 150, 9, 14, 7, 14, 9, 150, 150, 150, 150, 150, 150, 14, 9, 14, 7, 7, 14, 21, 9, 7, 7, 4, 5, 2, 9, 150, 21, 150, 150, 150, 150, 7, 14, 9, 6, 11, 150, 150, 150, 150, 150, 150, 14, 4, 7, 14, 7, 7, 7, 14, 9, 7, 7, 14, 14, 14, 150, 150, 150, 21, 150, 150, 150, 150, 150, 7, 7, 7, 14, 7, 150, 150, 150, 21, 21, 150, 7, 14, 9, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 14, 9, 6, 11, 150, 150, 150, 150, 1, 9, 14, 7, 150, 150, 150, 150, 150, 21, 150, 7, 14, 9, 150, 150, 150, 150, 9, 7, 3, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 14, 7, 7, 7, 14, 9, 150, 150, 150, 150, 150, 150, 4, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 7, 14, 9, 6, 150, 150, 150, 150, 150, 9, 150, 3, 7, 14, 21, 150, 150, 21, 150, 21, 150, 14, 4, 7, 14, 150, 21, 150, 150, 150, 9, 150, 3, 150, 150, 150, 150, 150, 150, 150, 150, 14, 9, 14, 6, 5, 2, 7, 14, 7, 14, 150, 150, 150, 150, 150, 150, 150, 150, 3, 7, 11, 14, 4, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 14, 9, 150, 150, 148, 150, 150, 21, 150, 150, 150, 150, 4, 10, 11, 14, 150, 150, 150, 150, 150, 150, 14, 7, 14, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 21, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 148, 150, 21, 150, 148, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 14, 14, 14, 14, 14, 14, 14, 150, 150, 150, 21, 150, 150, 150, 21, 150, 150, 21, 150, 150, 6, 4, 7, 14, 7, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 148, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 14, 14, 14, 7, 148, 150, 150, 148, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 9, 7, 7, 150, 150, 14, 9, 7, 7, 150, 21, 21, 9, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 148, 150, 150, 150, 150, 150, 150, 9, 7, 7, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 7, 14, 9, 150, 150, 21, 14, 14, 14, 7, 150, 150, 150, 150, 150, 150, 7, 2, 1, 9, 14, 21, 150, 150, 150, 150, 14, 9, 14, 4, 7, 21, 21, 9, 14, 7, 150, 150, 150, 150, 150, 150, 7, 7, 14, 9, 150, 21, 21, 150, 21, 150, 150, 150, 21, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 7, 14, 7, 7, 150, 150, 150, 150, 150, 150, 21, 150, 150, 205, 29, 91, 131, 109, 39, 93, 115, 91, 197, 93, 150, 150, 150, 150, 173, 149, 92, 163, 150, 3, 7, 14, 21, 150, 150, 150, 150, 14, 9, 14, 4, 7, 199, 114, 175, 51, 45, 33, 44, 37, 231, 72, 243, 147, 192, 115, 15, 204, 38, 65, 155, 225, 234, 156, 164, 177, 214, 60, 241, 161, 31, 28, 10, 7, 183, 147, 45, 107, 215, 94, 209, 218, 205, 7, 128, 172, 156, 45, 122, 77, 36, 190, 219, 116, 65, 102, 191, 35, 255, 177, 166, 247, 180, 128, 205, 107, 13, 105, 40, 244, 117, 92, 69, 165, 6, 32, 247, 221, 25, 229, 138, 219, 81, 8, 1, 54, 159, 181, 154, 163, 121, 239, 118, 112, 60, 3, 46, 102, 182, 191, 13, 162, 247, 194, 21, 15, 229, 0, 19, 190, 148, 18, 233, 150, 150, 150, 150, 4, 184, 150, 1, 41, 71, 231, 205, 243, 111, 119, 226, 197, 41, 195, 26, 79, 164, 177, 214, 155, 19, 237, 25, 29, 150, 150, 14, 9, 14, 4, 186, 166, 254, 244, 69, 4, 191, 112, 248, 241, 102, 63, 194, 131, 47, 231, 193, 9, 111, 65, 102, 191, 35, 255, 177, 166, 197, 87, 241, 129, 82, 151, 173, 9, 109, 138, 150, 3, 7, 14, 21, 150, 43, 216, 146, 201, 55, 32, 196, 111, 131, 84, 153, 101, 150, 150, 150, 150, 14, 7, 7, 14, 21, 150, 150, 21, 150, 21, 150, 51, 35, 1, 9, 14, 21, 150, 150, 177, 208, 150, 150, 150, 150, 150, 14, 7, 7, 7, 14, 9, 150, 150, 116, 65, 102, 191, 35, 255, 89, 209, 78, 188, 47, 126, 170, 1, 217, 87, 122, 186, 180, 126, 122, 81, 60, 225, 89, 44, 204, 108, 250, 195, 7, 143, 212, 211, 217, 81, 129, 212, 174, 5, 2, 9, 9, 0, 10, 11, 11, 1, 9, 14, 7, 7, 7, 208, 208, 208, 208, 208, 208, 208, 208, 3, 99, 235, 235, 235, 15, 0, 0, 99, 14, 10, 14, 7, 0, 9, 2, 6, 5, 8, 3, 12, 218, 150, 99, 8, 8, 14, 6, 14, 14, 150, 8, 10, 184, 236, 201, 235, 8, 235, 167, 235, 133, 150, 201, 82, 184, 5, 82, 184, 235, 218, 167, 13, 14, 13, 235, 167, 150, 218, 218, 201, 10, 167, 201, 201, 133, 5, 7, 133, 184, 82, 3, 14, 4, 10, 13, 0, 11, 14, 218, 201, 235, 10, 5, 15, 3, 11, 9, 235, 235, 235, 9, 184, 219, 150, 167, 12, 218, 201, 218, 184, 8, 201, 99, 184, 2, 2, 6, 5, 235, 133, 1, 2, 235, 184, 31, 184, 12, 150, 167, 201, 201, 184, 201, 218, 10, 10, 6, 2, 2, 11, 3, 11, 1, 133, 116, 201, 5, 116, 167, 218, 10, 2, 235, 218, 218, 235, 31, 184, 235, 9, 15, 235, 218, 2, 1, 0, 8, 1, 5, 11, 9, 235, 133, 235, 10, 9, 11, 14, 31, 184, 65, 13, 6, 167, 218, 235, 235, 201, 235, 219, 116, 235, 201, 219, 201, 99, 8, 3, 7, 5, 6, 13, 133, 150, 218, 8, 4, 150, 218, 15, 12, 0, 11, 15, 7, 0, 201, 235, 184, 31, 235, 9, 1, 201, 235, 201, 235, 11, 6, 9, 4, 235, 235, 167, 201, 12, 218, 116, 99, 167, 116, 2, 4, 11, 5, 2, 5, 11, 235, 235, 9, 12, 13, 14, 8, 10, 184, 6, 2, 14, 7, 8, 15, 218, 184, 184, 6, 5, 99, 99, 235, 150, 5, 0, 184, 48, 9, 5, 15, 5, 201, 218, 235, 150, 150, 235, 31, 167, 150, 219, 235, 82, 12, 10, 235, 184, 201, 9, 2, 14, 15, 14, 10, 7, 10, 4, 15, 118, 191, 228, 154, 8, 117, 228, 45, 118, 118, 118, 228, 118, 45, 228, 228, 45, 228, 45, 81, 44, 81, 154, 44, 45, 191, 8, 45, 118, 228, 228, 118, 8, 191, 191, 45, 153, 118, 45, 79, 81, 45, 228, 45, 228, 45, 80, 81, 118, 7, 45, 227, 81, 153, 154, 154, 118, 78, 116, 228, 117, 118, 228, 227, 153, 154, 118, 45, 228, 118, 228, 7, 44, 227, 118, 228, 118, 228, 8, 228, 154, 118, 154, 228, 7, 44, 45, 81, 7, 228, 81, 44, 227, 8, 189, 81, 228, 45, 45, 154, 45, 154, 44, 80, 228, 8, 45, 154, 228, 45, 118, 8, 8, 118, 228, 118, 191, 191, 117, 154, 191, 191, 228, 118, 191, 45, 228, 8, 45, 191, 227, 188, 228, 118, 228, 191, 115, 45, 118, 118, 228, 8, 191, 188, 191, 45, 81, 118, 228, 118, 191, 8, 44, 118, 44, 45, 81, 228, 45, 118, 8, 45, 7, 8, 44, 8, 228, 8, 45, 45, 191, 8, 45, 227, 7, 45, 118, 5, 45, 81, 81, 43, 45, 191, 191, 228, 118, 8, 8, 191, 45, 44, 45, 228, 45, 118, 228, 45, 228, 228, 191, 226, 8, 154, 45, 153, 8, 81, 191, 79, 117, 44, 81, 190, 12, 150, 167, 201, 201, 184, 81, 45, 227, 154, 45, 228, 228, 81, 45, 228, 118, 8, 228, 81, 154, 44, 191, 45, 8, 45, 80, 45, 228, 117, 118, 227, 43, 154, 8, 227, 45, 45, 228, 228, 118, 45, 228, 118, 228, 45, 43, 228, 228, 152, 191, 227, 154, 81, 81, 45, 228, 45, 45, 117, 45, 227, 79, 45, 228, 118, 228, 226, 154, 228, 118, 81, 228, 227, 225, 154, 8, 228, 118, 228, 8, 45, 115, 118, 7, 45, 44, 118, 118, 190, 45, 45, 117, 118, 118, 118, 191, 45, 191, 8, 228, 45, 117, 228, 228, 45, 45, 45, 191, 191, 191, 45, 227, 8, 81, 154, 45, 228, 191, 227, 227, 81, 227, 44, 117, 117, 228, 228, 118, 154, 45, 45, 45, 118, 45, 45, 117, 227, 45, 191, 118, 118, 228, 228, 228, 81, 118, 228, 154, 7, 154, 80, 228, 191, 81, 228, 8, 228, 8, 118, 227, 45, 117, 153, 5, 3, 8, 2, 201, 117, 118, 191, 45, 118, 45, 228, 228, 191, 45, 45, 228, 228, 191, 115, 227, 117, 45, 118, 154, 81, 45, 118, 154, 45, 191, 8, 8, 44, 117, 8, 228, 191, 191, 228, 45, 190, 228, 228, 228, 45, 45, 118, 118, 226, 228, 118, 81, 8, 118, 191, 45, 228, 227, 45, 228, 154, 190, 191, 190, 228, 118, 228, 8, 117, 8, 227, 228, 81, 154, 118, 189, 118, 6, 191, 191, 45, 226, 228, 118, 118, 227, 191, 191, 191, 8, 8, 191, 8, 118, 228, 45, 81, 45, 45, 8, 228, 118, 8, 45, 191, 191, 8, 45, 118, 154, 228, 118, 81, 45, 45, 8, 45, 45, 45, 228, 191, 81, 153, 8, 191, 8, 191, 227, 190, 227, 228, 118, 228, 227, 228, 45, 191, 117, 118, 228, 45, 191, 8, 44, 45, 191, 191, 8, 8, 228, 118, 154, 118, 228, 45, 118, 228, 118, 117, 45, 228, 227, 118, 228, 228, 45, 228, 8, 118, 228, 228, 191, 81, 201, 218, 235, 235, 45, 154, 44, 153, 45, 228, 118, 118, 190, 8, 5, 235, 133, 1, 2, 191, 228, 228, 227, 227, 228, 228, 252, 252, 252, 252, 252, 225, 225, 228, 7, 44, 45, 252, 252, 0, 9, 2, 6, 5, 8, 3, 12, 218, 150, 99, 8, 252, 252, 235, 201, 218, 13, 235, 184, 167, 133, 218, 201, 4, 235, 218, 116, 48, 201, 184, 82, 184, 253, 235, 184, 201, 167, 133, 235, 116, 201, 218, 99, 218, 65, 99, 235, 167, 201, 218, 252, 252, 252, 15, 14, 14, 235, 150, 167, 8, 252, 225, 252, 184, 201, 218, 235, 201, 235, 235, 235, 9, 184, 219, 150, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 235, 9, 1, 201, 235, 201, 235, 11, 252, 235, 184, 184, 235, 235, 218, 235, 225, 252, 252, 198, 116, 2, 4, 11, 5, 2, 252, 236, 202, 218, 82, 235, 235, 167, 116, 225, 252, 225, 252, 252, 117, 8, 228, 191, 252, 252, 218, 235, 218, 184, 167, 133, 133, 201, 167, 235, 167, 201, 184, 167, 48, 218, 201, 236, 252, 252, 45, 228, 8, 45, 191, 227, 188, 7, 184, 235, 218, 167, 235, 235, 235, 14, 15, 167, 116, 201, 184, 184, 235, 99, 235, 252, 225, 252, 252, 252, 225, 252, 133, 184, 201, 184, 235, 82, 201, 167, 235, 225, 252, 252, 48, 9, 0, 11, 252, 252, 252, 252, 7, 184, 201, 218, 235, 201, 4, 252, 167, 201, 235, 14, 235, 201, 184, 218, 201, 201, 219, 201, 99, 116, 31, 201, 99, 133, 235, 1, 2, 11, 9, 9, 12, 116, 218, 235, 14, 3, 5, 218, 184, 236, 5, 184, 184, 184, 1, 252, 252, 252, 252, 252, 133, 167, 235, 235, 184, 235, 184, 167, 184, 150, 48, 218, 133, 219, 31, 201, 218, 235, 236, 235, 218, 167, 167, 133, 133, 201, 167, 235, 167, 201, 184, 167, 48, 218, 201, 252, 252, 252, 252, 252, 252, 252, 225, 225, 252, 15, 14, 5, 150, 235, 99, 13, 4, 133, 31, 184, 218, 10, 201, 218, 184, 5, 252, 252, 252, 252, 184, 167, 0, 235, 252, 252, 252, 252, 252, 252, 8, 8, 191, 8, 118, 228, 45, 81, 45, 45, 8, 228, 118, 8, 45, 191, 191, 8, 45, 118, 252, 252, 252, 252, 45, 228, 118, 8, 228, 81, 154, 44, 191, 45, 198, 252, 4, 3, 12, 65, 218, 218, 252, 252, 252, 252, 235, 184, 184, 235, 201, 235, 167, 133, 235, 252, 252, 252, 198, 252, 225, 198, 252, 225, 9, 10, 184, 252, 252, 252, 252, 4, 48, 235, 99, 0, 184, 235, 184, 15, 12, 218, 167, 167, 184, 1, 10, 5, 0, 11, 3, 15, 10, 235, 235, 184, 6, 1, 2, 252, 252, 252, 252, 252, 225, 252, 252, 252, 228, 227, 45, 228, 154, 190, 191, 190, 228, 118, 228, 8, 117, 8, 235, 6, 4, 0, 252, 252, 252, 252, 225, 252, 252, 252, 225, 252, 252, 201, 184, 11, 0, 218, 133, 219, 218, 252, 252, 99, 48, 235, 235, 184, 201, 65, 235, 116, 201, 235, 201, 133, 31, 82, 235, 218, 235, 65, 48, 150, 167, 150, 116, 82, 235, 201, 201, 167, 201, 218, 150, 218, 65, 252, 184, 219, 150, 167, 12, 252, 252, 225, 252, 252, 228, 118, 191, 8, 44, 118, 44, 45, 81, 228, 45, 118, 8, 45, 7, 8, 44, 8, 228, 8, 45, 45, 191, 133, 82, 184, 235, 184, 5, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 225, 252, 252, 252, 252, 252, 167, 201, 218, 252, 235, 218, 235, 167, 167, 235, 31, 65, 235, 184, 133, 150, 2, 1, 1, 7, 201, 133, 184, 7, 1, 15, 99, 235, 201, 218, 133, 218, 99, 8, 218, 235, 133, 235, 4, 4, 184, 150, 185, 2, 4, 117, 45, 227, 79, 45, 116, 32, 157, 159, 206, 71, 36, 114, 116, 73, 159, 116, 116, 114, 71, 243, 157, 71, 73, 157, 116, 157, 73, 32, 71, 114, 114, 116, 114, 71, 114, 159, 202, 116, 200, 114, 73, 116, 200, 75, 249, 243, 116, 30, 73, 200, 71, 114, 114, 116, 114, 71, 116, 114, 159, 161, 71, 243, 71, 116, 71, 116, 243, 243, 116, 71, 71, 71, 36, 32, 116, 116, 116, 116, 73, 251, 32, 114, 77, 34, 116, 116, 116, 71, 159, 32, 243, 71, 116, 114, 116, 116, 202, 159, 157, 116, 71, 116, 38, 243, 116, 114, 157, 116, 159, 200, 30, 114, 157, 159, 157, 243, 30, 36, 116, 32, 116, 114, 157, 114, 116, 161, 114, 114, 71, 32, 157, 73, 71, 116, 114, 157, 17, 116, 116, 116, 116, 157, 251, 32, 200, 114, 200, 243, 116, 116, 114, 157, 36, 116, 200, 116, 202, 77, 114, 71, 118, 30, 116, 32, 202, 159, 114, 114, 114, 116, 116, 157, 71, 32, 32, 71, 116, 30, 157, 200, 157, 204, 32, 36, 157, 116, 200, 118, 75, 159, 114, 71, 157, 202, 114, 114, 114, 157, 157, 116, 71, 73, 157, 116, 200, 159, 157, 116, 116, 114, 116, 243, 116, 71, 75, 116, 114, 116, 116, 71, 71, 116, 32, 116, 114, 116, 116, 208, 118, 116, 157, 116, 114, 116, 118, 77, 114, 114, 116, 157, 124, 32, 71, 71, 157, 116, 116, 120, 30, 114, 116, 114, 157, 247, 243, 116, 73, 116, 243, 159, 206, 71, 71, 36, 36, 243, 30, 73, 116, 116, 243, 118, 200, 200, 114, 159, 81, 159, 114, 30, 114, 206, 30, 32, 247, 116, 116, 71, 116, 116, 71, 32, 202, 114, 114, 157, 116, 118, 243, 71, 116, 71, 116, 116, 36, 17, 77, 157, 36, 36, 116, 159, 157, 116, 116, 34, 161, 36, 245, 116, 116, 116, 116, 32, 157, 165, 159, 71, 32, 32, 116, 116, 206, 30, 120, 114, 36, 36, 36, 159, 114, 114, 114, 200, 116, 116, 159, 157, 71, 116, 32, 157, 202, 36, 116, 157, 116, 116, 116, 114, 71, 114, 200, 202, 116, 206, 71, 116, 116, 71, 71, 116, 116, 116, 36, 114, 71, 32, 243, 32, 71, 157, 71, 200, 245, 116, 32, 71, 157, 71, 36, 32, 116, 116, 114, 157, 200, 77, 114, 114, 71, 30, 118, 32, 157, 75, 114, 116, 116, 251, 32, 116, 114, 36, 116, 36, 114, 114, 30, 71, 30, 116, 30, 114, 116, 116, 73, 116, 30, 73, 71, 30, 157, 157, 114, 116, 200, 73, 116, 157, 71, 118, 116, 32, 157, 73, 116, 116, 32, 157, 159, 243, 71, 71, 71, 210, 114, 73, 116, 159, 243, 36, 116, 114, 71, 71, 157, 71, 34, 200, 157, 200, 157, 30, 159, 202, 116, 71, 34, 114, 73, 71, 116, 71, 71, 243, 202, 116, 114, 116, 114, 159, 30, 116, 73, 157, 245, 116, 200, 114, 114, 200, 116, 114, 116, 116, 116, 200, 200, 73, 71, 114, 251, 32, 114, 157, 36, 116, 200, 157, 124, 114, 71, 157, 251, 36, 243, 114, 157, 32, 243, 116, 116, 200, 202, 120, 159, 159, 206, 75, 200, 204, 116, 71, 71, 114, 73, 71, 200, 118, 71, 32, 116, 159, 73, 116, 116, 73, 71, 30, 157, 157, 73, 200, 200, 157, 200, 247, 243, 73, 71, 75, 36, 71, 32, 36, 157, 116, 245, 30, 73, 71, 116, 159, 116, 71, 116, 32, 157, 159, 30, 202, 32, 116, 116, 159, 32, 159, 159, 200, 32, 36, 114, 32, 71, 200, 71, 114, 243, 200, 73, 159, 243, 75, 30, 200, 32, 116, 71, 161, 116, 116, 116, 71, 159, 116, 71, 32, 159, 243, 116, 159, 118, 71, 157, 116, 36, 36, 114, 116, 32, 157, 73, 30, 73, 116, 116, 116, 157, 36, 116, 157, 202, 30, 114, 157, 251, 243, 157, 75, 114, 114, 71, 167, 116, 243, 157, 243, 157, 116, 202, 71, 32, 114, 73, 157, 116, 157, 116, 157, 71, 245, 32, 114, 116, 114, 200, 247, 243, 116, 116, 116, 32, 71, 114, 114, 157, 159, 116, 243, 32, 36, 36, 36, 243, 204, 71, 34, 159, 251, 157, 71, 32, 200, 114, 157, 73, 71, 202, 116, 114, 249, 73, 32, 157, 73, 116, 116, 157, 116, 71, 200, 73, 157, 243, 118, 114, 157, 251, 30, 116, 71, 116, 71, 116, 245, 114, 157, 243, 116, 71, 116, 114, 116, 200, 71, 114, 116, 116, 71, 202, 116, 73, 71, 116, 71, 159, 114, 71, 73, 71, 75, 116, 116, 116, 116, 116, 157, 71, 247, 245, 34, 157, 116, 114, 157, 71, 116, 200, 73, 157, 71, 157, 157, 249, 200, 116, 116, 116, 116, 71, 116, 116, 157, 157, 116, 114, 157, 116, 71, 157, 116, 71, 116, 116, 157, 116, 114, 116, 116, 30, 202, 116, 73, 30, 116, 157, 116, 114, 157, 251, 114, 159, 114, 122, 30, 32, 116, 73, 116, 157, 71, 32, 116, 116, 71, 114, 157, 116, 167, 116, 116, 30, 71, 114, 116, 243, 116, 71, 116, 73, 200, 161, 116, 30, 116, 114, 157, 114, 114, 116, 157, 243, 17, 73, 71, 116, 120, 34, 116, 202, 243, 71, 71, 116, 32, 116, 116, 116, 116, 245, 30, 71, 73, 71, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 116, 167, 116, 32, 116, 116, 73, 116, 71, 36, 116, 32, 243, 30, 116, 200, 243, 114, 157, 159, 30, 157, 116, 118, 114, 118, 200, 206, 30, 71, 157, 32, 118, 71, 116, 71, 36, 116, 114, 157, 30, 114, 114, 116, 200, 161, 114, 114, 73, 32, 116, 116, 116, 73, 116, 116, 71, 30, 116, 116, 116, 116, 116, 116, 157, 116, 200, 116, 116, 116, 116, 71, 71, 206, 116, 73, 114, 116, 116, 116, 116, 114, 73, 157, 114, 116, 116, 116, 116, 71, 116, 116, 71, 71, 34, 114, 32, 116, 157, 71, 116, 73, 114, 116, 116, 116, 71, 116, 243, 116, 114, 206, 71, 116, 71, 116, 157, 30, 202, 243, 32, 159, 116, 116, 71, 116, 157, 114, 71, 114, 157, 249, 116, 116, 202, 114, 75, 73, 116, 73, 71, 159, 116, 71, 116, 32, 157, 159, 30, 71, 157, 71, 32, 157, 161, 114, 30, 30, 116, 116, 71, 71, 36, 116, 245, 36, 32, 116, 30, 202, 206, 159, 30, 202, 157, 71, 159, 116, 116, 116, 157, 200, 75, 30, 30, 114, 159, 30, 71, 71, 114, 200, 200, 159, 245, 116, 116, 116, 30, 71, 30, 114, 116, 200, 71, 73, 71, 73, 116, 73, 75, 159, 206, 32, 200, 245, 34, 116, 73, 157, 114, 157, 30, 116, 114, 159, 116, 159, 243, 71, 200, 159, 202, 116, 71, 243, 243, 206, 71, 157, 200, 30, 114, 157, 159, 116, 116, 200, 245, 83, 243, 36, 114, 116, 71, 114, 116, 30, 71, 30, 71, 116, 157, 114, 118, 116, 114, 116, 116, 243, 157, 116, 114, 157, 251, 116, 116, 157, 251, 32, 32, 245, 114, 73, 114, 159, 30, 32, 116, 116, 114, 157, 116, 116, 30, 73, 71, 206, 71, 116, 116, 71, 32, 114, 159, 79, 116, 116, 116, 116, 157, 116, 167, 116, 116, 116, 116, 116, 200, 73, 114, 73, 116, 116, 243, 202, 71, 73, 32, 116, 247, 251, 32, 116, 251, 118, 30, 116, 77, 208, 157, 32, 116, 116, 116, 73, 200, 251, 116, 71, 157, 30, 157, 251, 32, 202, 202, 71, 159, 243, 159, 30, 71, 200, 71, 116, 157, 34, 116, 116, 116, 71, 36, 71, 71, 116, 159, 114, 202, 116, 116, 251, 116, 114, 206, 204, 71, 157, 159, 30, 202, 32, 116, 116, 159, 71, 30, 243, 114, 157, 114, 114, 116, 32, 157, 165, 159, 206, 32, 114, 116, 159, 159, 202, 116, 71, 116, 114, 118, 71, 114, 116, 114, 114, 243, 32, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 71, 157, 116, 157, 71, 32, 118, 116, 116, 71, 116, 73, 114, 116, 116, 71, 116, 114, 116, 71, 116, 71, 116, 114, 157, 159, 202, 116, 200, 116, 71, 157, 118, 30, 245, 114, 71, 30, 157, 116, 71, 71, 83, 32, 114, 116, 114, 71, 243, 245, 32, 114, 114, 157, 73, 167, 116, 114, 73, 17, 116, 202, 71, 116, 202, 116, 243, 71, 116, 159, 71, 243, 17, 210, 73, 200, 114, 116, 163, 73, 32, 116, 116, 116, 116, 157, 157, 200, 30, 73, 247, 200, 114, 157, 73, 71, 114, 157, 159, 202, 116, 114, 71, 116, 120, 157, 200, 116, 202, 157, 71, 114, 200, 77, 114, 116, 116, 71, 73, 243, 32, 73, 243, 200, 249, 116, 114, 116, 157, 71, 71, 157, 251, 36, 116, 157, 157, 243, 157, 116, 71, 73, 157, 118, 200, 116, 157, 114, 157, 71, 32, 116, 114, 157, 116, 114, 116, 116, 116, 36, 116, 71, 157, 116, 116, 114, 73, 159, 202, 116, 116, 114, 116, 116, 247, 116, 116, 204, 73, 71, 116, 32, 157, 159, 243, 71, 71, 157, 114, 116, 114, 71, 114, 114, 157, 118, 157, 30, 116, 116, 116, 116, 71, 71, 204, 116, 157, 116, 116, 114, 73, 118, 71, 157, 163, 36, 116, 116, 71, 116, 114, 200, 114, 116, 75, 200, 75, 116, 116, 116, 161, 81, 114, 116, 116, 124, 71, 116, 157, 34, 157, 116, 36, 71, 251, 243, 200, 116, 114, 202, 204, 243, 120, 157, 116, 167, 116, 32, 116, 114, 71, 157, 159, 206, 71, 116, 71, 161, 71, 200, 202, 116, 116, 116, 120, 157, 32, 116, 71, 165, 206, 116, 116, 73, 157, 114, 251, 32, 157, 32, 243, 71, 30, 114, 116, 71, 30, 157, 71, 116, 71, 116, 118, 243, 157, 116, 202, 157, 202, 73, 116, 116, 116, 114, 116, 71, 251, 243, 116, 200, 116, 71, 71, 116, 73, 71, 116, 71, 114, 157, 128, 118, 71, 116, 114, 114, 71, 73, 116, 116, 157, 71, 17, 71, 114, 157, 116, 167, 116, 32, 116, 243, 202, 157, 157, 157, 30, 116, 73, 157, 116, 71, 116, 157, 116, 114, 116, 116, 116, 116, 71, 71, 32, 116, 73, 116, 157, 200, 157, 116, 114, 116, 114, 30, 30, 116, 71, 163, 73, 116, 116, 116, 116, 114, 116, 116, 114, 73, 116, 73, 159, 116, 243, 118, 251, 247, 247, 71, 75, 30, 73, 116, 116, 116, 32, 114, 114, 71, 32, 251, 200, 202, 114, 116, 114, 116, 202, 114, 159, 17, 116, 30, 114, 114, 116, 73, 116, 71, 32, 114, 157, 251, 116, 116, 118, 114, 71, 30, 114, 34, 116, 36, 116, 114, 157, 251, 32, 116, 116, 208, 206, 159, 251, 32, 204, 161, 71, 73, 71, 206, 114, 157, 116, 116, 243, 243, 116, 71, 116, 243, 247, 30, 30, 114, 116, 71, 116, 126, 202, 114, 116, 200, 243, 114, 157, 116, 116, 114, 116, 114, 71, 114, 116, 116, 114, 206, 114, 157, 73, 167, 116, 243, 116, 251, 32, 71, 71, 157, 71, 116, 116, 71, 200, 202, 114, 114, 116, 73, 116, 116, 116, 116, 71, 116, 243, 116, 251, 32, 116, 206, 159, 251, 32, 73, 116, 116, 157, 116, 114, 116, 116, 116, 116, 116, 116, 71, 36, 114, 116, 71, 116, 71, 202, 200, 71, 32, 116, 116, 73, 161, 30, 114, 157, 251, 114, 116, 71, 114, 116, 71, 73, 32, 116, 245, 71, 200, 75, 200, 116, 116, 116, 116, 71, 114, 116, 116, 116, 116, 116, 81, 116, 71, 114, 116, 245, 200, 116, 116, 71, 116, 157, 200, 159, 245, 116, 116, 116, 30, 71, 30, 114, 116, 157, 114, 161, 245, 167, 159, 200, 32, 73, 71, 206, 71, 114, 200, 71, 71, 200, 71, 114, 71, 73, 116, 114, 157, 251, 32, 114, 77, 200, 32, 245, 200, 202, 116, 243, 71, 116, 73, 245, 71, 245, 200, 30, 251, 32, 157, 157, 157, 32, 157, 200, 114, 71, 159, 32, 127, 200, 157, 71, 210, 245, 120, 157, 253, 73, 200, 71, 114, 157, 116, 200, 77, 114, 116, 116, 71, 73, 157, 116, 114, 116, 30, 159, 118, 116, 32, 157, 200, 157, 200, 71, 75, 73, 116, 116, 243, 157, 116, 114, 114, 114, 116, 200, 71, 245, 118, 157, 159, 71, 157, 116, 247, 30, 71, 116, 71, 30, 30, 204, 245, 157, 71, 157, 200, 71, 116, 157, 36, 116, 71, 30, 116, 116, 116, 114, 73, 116, 243, 243, 116, 73, 157, 116, 75, 114, 73, 116, 71, 71, 206, 159, 163, 157, 159, 71, 36, 159, 30, 202, 157, 116, 114, 157, 159, 206, 71, 116, 71, 116, 243, 71, 32, 71, 73, 157, 116, 114, 116, 157, 200, 30, 32, 71, 118, 114, 116, 157, 157, 116, 30, 114, 116, 116, 32, 114, 114, 71, 159, 159, 157, 36, 159, 200, 116, 202, 114, 114, 116, 71, 116, 243, 243, 116, 71, 116, 73, 114, 32, 114, 32, 245, 114, 79, 157, 200, 114, 116, 114, 243, 157, 200, 71, 157, 116, 36, 71, 116, 71, 30, 200, 157, 71, 32, 116, 114, 157, 251, 114, 116, 73, 73, 243, 200, 116, 116, 116, 114, 30, 116, 157, 243, 157, 202, 71, 200, 159, 114, 73, 116, 116, 71, 114, 243, 71, 114, 114, 116, 114, 159, 30, 116, 30, 157, 116, 243, 200, 114, 71, 243, 157, 116, 116, 200, 71, 116, 159, 114, 204, 116, 114, 243, 161, 157, 32, 114, 73, 159, 32, 114, 114, 116, 116, 116, 200, 34, 116, 30, 116, 71, 30, 75, 116, 116, 200, 36, 116, 116, 116, 71, 157, 116, 159, 157, 32, 32, 116, 116, 71, 116, 71, 116, 116, 114, 245, 157, 251, 116, 157, 116, 32, 157, 202, 116, 114, 73, 71, 116, 202, 243, 245, 116, 251, 116, 163, 30, 114, 34, 114, 73, 71, 116, 157, 71, 243, 202, 116, 114, 116, 114, 159, 30, 116, 200, 73, 114, 202, 163, 157, 243, 159, 159, 159, 116, 32, 36, 71, 71, 30, 116, 114, 116, 116, 118, 114, 71, 114, 73, 116, 251, 32, 116, 114, 243, 206, 118, 17, 116, 159, 116, 71, 116, 71, 116, 202, 157, 116, 116, 116, 116, 34, 157, 114, 71, 200, 202, 116, 243, 71, 116, 243, 157, 251, 32, 114, 243, 243, 36, 36, 116, 114, 157, 251, 32, 116, 36, 36, 116, 114, 157, 200, 116, 71, 116, 114, 32, 159, 116, 116, 114, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 114, 116, 114, 157, 251, 32, 75, 36, 36, 116, 114, 116, 200, 159, 202, 71, 116, 116, 77, 200, 30, 163, 157, 161, 114, 114, 73, 73, 71, 116, 30, 114, 116, 116, 116, 116, 71, 200, 159, 165, 159, 206, 32, 114, 159, 75, 200, 116, 243, 71, 116, 73, 114, 116, 245, 200, 116, 71, 116, 116, 200, 118, 200, 71, 71, 36, 116, 73, 73, 116, 116, 71, 114, 75, 30, 73, 116, 116, 116, 116, 30, 243, 159, 71, 116, 251, 116, 36, 116, 114, 159, 71, 36, 71, 243, 159, 71, 116, 114, 116, 206, 71, 114, 157, 73, 116, 116, 71, 116, 71, 114, 243, 32, 73, 73, 30, 71, 116, 71, 116, 167, 116, 157, 157, 32, 30, 114, 157, 116, 71, 114, 157, 200, 116, 114, 114, 116, 73, 116, 116, 159, 114, 114, 116, 73, 114, 116, 206, 36, 116, 73, 200, 116, 73, 114, 114, 36, 114, 32, 114, 114, 157, 114, 114, 71, 71, 206, 114, 17, 116, 116, 73, 30, 157, 116, 116, 116, 116, 71, 245, 116, 30, 73, 71, 159, 116, 71, 116, 202, 73, 71, 116, 71, 206, 116, 73, 114, 36, 71, 116, 71, 116, 114, 118, 116, 157, 120, 71, 116, 32, 116, 73, 73, 247, 114, 204, 116, 114, 204, 116, 114, 116, 116, 116, 77, 116, 114, 114, 200, 114, 116, 116, 243, 159, 114, 36, 36, 73, 116, 116, 71, 116, 73, 71, 71, 71, 71, 36, 32, 116, 116, 116, 159, 116, 30, 116, 157, 116, 116, 71, 30, 243, 206, 200, 114, 73, 114, 73, 116, 251, 32, 116, 114, 243, 206, 71, 114, 73, 71, 114, 114, 157, 114, 114, 114, 114, 157, 116, 114, 243, 114, 116, 200, 161, 157, 116, 114, 71, 245, 30, 159, 32, 200, 116, 118, 243, 116, 73, 71, 157, 71, 116, 116, 114, 200, 114, 157, 71, 32, 116, 71, 116, 157, 114, 116, 114, 116, 71, 116, 116, 157, 116, 116, 116, 73, 32, 157, 116, 116, 71, 116, 116, 116, 116, 114, 116, 73, 73, 157, 114, 157, 71, 32, 114, 32, 157, 116, 200, 75, 116, 36, 36, 116, 116, 245, 116, 71, 30, 73, 30, 116, 243, 114, 71, 114, 116, 116, 116, 116, 116, 116, 30, 249, 116, 116, 73, 71, 116, 71, 116, 114, 73, 71, 116, 36, 36, 36, 157, 157, 73, 116, 116, 116, 167, 116, 116, 157, 243, 157, 116, 116, 116, 163, 30, 36, 32, 116, 36, 116, 71, 157, 116, 114, 116, 157, 200, 200, 200, 114, 118, 32, 73, 116, 114, 73, 243, 30, 73, 71, 116, 159, 116, 71, 116, 32, 157, 159, 30, 202, 32, 116, 116, 159, 71, 30, 243, 114, 157, 114, 114, 116, 32, 157, 165, 159, 206, 32, 114, 116, 159, 159, 202, 116, 71, 116, 114, 118, 71, 114, 116, 114, 114, 243, 32, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 71, 157, 116, 157, 71, 32, 118, 71, 114, 114, 32, 116, 114, 157, 75, 202, 161, 114, 114, 71, 157, 243, 114, 243, 73, 245, 116, 114, 71, 30, 157, 71, 118, 71, 118, 204, 116, 114, 157, 30, 75, 243, 161, 75, 114, 116, 159, 116, 36, 116, 30, 159, 71, 116, 116, 114, 157, 243, 243, 71, 116, 157, 36, 36, 116, 159, 114, 200, 114, 157, 32, 114, 251, 243, 71, 200, 71, 118, 114, 116, 157, 116, 114, 157, 71, 159, 34, 75, 206, 71, 116, 116, 71, 71, 116, 116, 116, 36, 114, 71, 32, 243, 32, 71, 157, 71, 200, 245, 116, 32, 71, 157, 71, 36, 32, 116, 116, 114, 157, 200, 77, 114, 114, 71, 30, 118, 32, 157, 75, 114, 116, 116, 116, 73, 159, 116, 116, 243, 116, 116, 167, 116, 71, 116, 116, 157, 114, 116, 200, 73, 116, 71, 116, 116, 71, 116, 116, 116, 71, 71, 32, 116, 73, 116, 157, 71, 32, 157, 116, 71, 73, 81, 159, 114, 30, 157, 30, 114, 32, 71, 71, 116, 116, 118, 116, 116, 114, 159, 30, 116, 73, 30, 116, 116, 114, 116, 116, 71, 71, 116, 32, 116, 114, 116, 122, 243, 77, 34, 247, 243, 163, 159, 157, 71, 116, 30, 200, 200, 200, 157, 251, 118, 30, 116, 77, 208, 157, 200, 114, 118, 116, 32, 243, 247, 157, 157, 73, 114, 36, 36, 36, 114, 251, 32, 202, 116, 71, 36, 200, 116, 116, 116, 116, 157, 114, 73, 71, 245, 157, 71, 243, 202, 157, 71, 116, 243, 116, 30, 161, 200, 73, 202, 200, 116, 120, 157, 116, 73, 200, 116, 71, 75, 200, 71, 71, 116, 32, 200, 30, 116, 202, 116, 116, 157, 114, 116, 116, 114, 157, 116, 114, 157, 161, 32, 116, 116, 71, 157, 118, 204, 71, 71, 116, 243, 116, 73, 159, 159, 71, 116, 71, 116, 116, 116, 114, 75, 116, 202, 71, 159, 71, 32, 73, 243, 200, 202, 116, 71, 159, 71, 32, 73, 114, 157, 251, 32, 157, 157, 116, 114, 30, 116, 116, 116, 73, 79, 157, 243, 71, 116, 167, 30, 202, 32, 118, 71, 114, 32, 116, 114, 157, 251, 114, 114, 32, 116, 114, 73, 71, 71, 114, 116, 114, 116, 114, 116, 30, 32, 116, 202, 71, 157, 157, 249, 116, 116, 116, 116, 157, 114, 116, 243, 116, 71, 200, 71, 116, 116, 167, 71, 114, 116, 116, 118, 114, 71, 114, 116, 36, 71, 36, 36, 32, 157, 116, 200, 32, 200, 116, 73, 159, 243, 159, 116, 71, 114, 73, 71, 73, 71, 116, 71, 32, 118, 114, 161, 116, 114, 30, 114, 114, 114, 116, 116, 118, 116, 116, 243, 116, 116, 116, 245, 157, 200, 157, 116, 114, 116, 116, 157, 251, 114, 116, 114, 200, 200, 114, 159, 159, 159, 206, 243, 32, 116, 116, 116, 116, 30, 36, 165, 159, 206, 32, 114, 159, 75, 200, 116, 73, 245, 71, 251, 114, 116, 114, 251, 200, 159, 114, 71, 114, 200, 202, 163, 116, 245, 114, 71, 208, 157, 77, 71, 114, 200, 114, 32, 114, 114, 114, 116, 73, 116, 116, 200, 114, 75, 116, 114, 36, 116, 114, 116, 30, 200, 75, 243, 159, 251, 116, 116, 116, 157, 243, 36, 114, 116, 116, 36, 36, 116, 114, 157, 251, 30, 116, 71, 116, 116, 116, 71, 116, 36, 116, 114, 243, 118, 75, 116, 116, 116, 71, 71, 208, 159, 251, 32, 116, 157, 251, 32, 157, 116, 32, 157, 249, 157, 251, 32, 116, 32, 247, 157, 32, 36, 114, 116, 73, 116, 71, 116, 32, 36, 36, 36, 36, 116, 114, 116, 71, 116, 116, 116, 116, 71, 116, 159, 116, 116, 36, 36, 116, 116, 116, 116, 116, 71, 116, 200, 71, 114, 71, 116, 30, 120, 114, 36, 36, 36, 36, 116, 32, 116, 114, 157, 114, 116, 114, 71, 208, 206, 159, 251, 32, 116, 114, 116, 116, 116, 116, 116, 116, 116, 116, 120, 157, 32, 116, 71, 116, 157, 165, 159, 206, 32, 116, 116, 159, 71, 116, 71, 116, 36, 116, 114, 243, 159, 71, 116, 114, 116, 116, 114, 200, 116, 157, 116, 36, 71, 116, 251, 116, 116, 114, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 159, 206, 71, 116, 30, 116, 116, 206, 163, 116, 116, 116, 159, 73, 116, 30, 73, 71, 116, 116, 116, 32, 36, 36, 116, 114, 116, 116, 157, 116, 114, 114, 116, 202, 71, 159, 157, 32, 116, 71, 71, 243, 114, 114, 116, 73, 116, 200, 157, 157, 73, 73, 161, 116, 71, 116, 114, 116, 116, 73, 204, 116, 114, 157, 71, 116, 116, 114, 116, 30, 73, 71, 116, 159, 116, 116, 116, 159, 71, 34, 157, 116, 200, 116, 71, 116, 157, 116, 116, 71, 71, 30, 114, 200, 118, 116, 32, 157, 73, 116, 71, 116, 116, 157, 116, 114, 116, 116, 118, 116, 116, 116, 116, 71, 116, 116, 30, 114, 122, 159, 202, 116, 243, 116, 116, 114, 114, 161, 114, 206, 71, 116, 116, 202, 157, 243, 247, 36, 71, 245, 71, 251, 114, 159, 159, 34, 38, 243, 116, 200, 200, 251, 75, 157, 157, 73, 157, 243, 202, 243, 157, 116, 116, 243, 116, 114, 116, 114, 157, 200, 30, 114, 116, 116, 159, 157, 75, 200, 73, 79, 200, 157, 200, 243, 116, 114, 116, 159, 157, 116, 114, 30, 114, 116, 116, 116, 245, 200, 71, 157, 157, 114, 157, 251, 32, 161, 157, 116, 159, 159, 116, 116, 200, 71, 32, 73, 116, 116, 71, 114, 114, 73, 30, 116, 116, 116, 118, 116, 157, 71, 116, 157, 206, 77, 71, 30, 249, 32, 114, 73, 114, 251, 32, 251, 200, 202, 114, 116, 114, 116, 202, 114, 159, 114, 116, 116, 120, 116, 30, 71, 206, 116, 118, 73, 71, 116, 116, 114, 73, 73, 159, 202, 116, 71, 116, 116, 116, 120, 30, 73, 71, 116, 200, 243, 71, 243, 202, 243, 73, 116, 159, 71, 202, 159, 116, 116, 36, 36, 116, 157, 73, 206, 251, 116, 200, 36, 157, 116, 245, 116, 116, 114, 120, 157, 32, 116, 114, 73, 202, 114, 157, 251, 32, 116, 245, 71, 116, 159, 30, 71, 114, 159, 79, 30, 71, 73, 114, 71, 71, 202, 116, 116, 116, 114, 157, 159, 30, 30, 116, 34, 116, 116, 36, 157, 116, 167, 116, 71, 116, 200, 71, 116, 159, 114, 204, 116, 114, 243, 161, 157, 32, 114, 73, 159, 32, 114, 114, 116, 116, 116, 200, 34, 116, 30, 116, 71, 30, 75, 116, 116, 71, 114, 73, 159, 73, 157, 118, 169, 116, 30, 116, 75, 159, 116, 73, 116, 116, 73, 202, 114, 73, 73, 114, 157, 251, 200, 73, 159, 202, 73, 159, 36, 36, 243, 157, 34, 36, 114, 116, 116, 71, 200, 159, 71, 200, 157, 157, 116, 245, 116, 243, 243, 71, 116, 116, 157, 116, 114, 116, 200, 200, 114, 118, 116, 114, 116, 157, 159, 116, 118, 114, 118, 71, 116, 114, 114, 71, 114, 75, 114, 32, 71, 200, 75, 71, 73, 116, 32, 247, 114, 116, 30, 71, 32, 200, 116, 114, 30, 75, 243, 71, 116, 202, 200, 200, 71, 206, 159, 251, 30, 157, 114, 116, 157, 114, 116, 116, 116, 71, 116, 32, 157, 71, 116, 200, 157, 200, 116, 71, 157, 118, 204, 71, 243, 116, 159, 159, 206, 116, 116, 71, 71, 206, 71, 114, 157, 73, 116, 116, 71, 116, 71, 159, 161, 114, 114, 71, 71, 206, 116, 200, 116, 114, 157, 251, 32, 116, 116, 157, 251, 32, 32, 114, 159, 32, 157, 116, 157, 71, 32, 116, 71, 116, 116, 116, 71, 159, 114, 71, 116, 200, 71, 116, 116, 157, 71, 32, 118, 71, 71, 116, 32, 73, 71, 116, 200, 116, 71, 116, 116, 116, 116, 116, 116, 71, 116, 73, 157, 32, 157, 200, 30, 75, 200, 116, 200, 116, 200, 116, 30, 73, 71, 116, 159, 116, 71, 116, 202, 73, 71, 116, 71, 159, 114, 114, 116, 73, 116, 116, 116, 30, 202, 157, 116, 71, 118, 116, 32, 157, 73, 32, 116, 114, 157, 253, 30, 116, 116, 71, 116, 32, 157, 73, 116, 157, 71, 32, 71, 116, 116, 157, 245, 116, 116, 71, 116, 159, 114, 202, 116, 116, 251, 32, 116, 114, 206, 204, 71, 116, 71, 114, 114, 36, 116, 116, 116, 116, 202, 71, 159, 32, 116, 114, 157, 251, 32, 114, 157, 159, 116, 243, 116, 73, 73, 71, 116, 114, 157, 161, 157, 75, 122, 73, 71, 71, 116, 116, 71, 116, 71, 116, 157, 251, 32, 116, 245, 71, 116, 116, 116, 71, 71, 116, 116, 114, 114, 116, 243, 116, 202, 116, 73, 73, 116, 114, 251, 32, 157, 159, 202, 116, 116, 116, 116, 30, 116, 116, 200, 114, 157, 116, 243, 116, 73, 73, 71, 116, 71, 71, 157, 200, 71, 116, 32, 36, 73, 159, 116, 157, 200, 32, 36, 36, 36, 36, 116, 36, 114, 114, 116, 116, 71, 114, 157, 36, 116, 71, 159, 114, 114, 116, 157, 114, 116, 71, 30, 116, 30, 30, 202, 116, 116, 157, 71, 116, 116, 157, 116, 114, 114, 71, 243, 206, 71, 157, 243, 116, 157, 157, 116, 73, 243, 159, 157, 124, 116, 114, 157, 157, 251, 243, 157, 114, 157, 251, 32, 116, 116, 159, 71, 157, 116, 247, 116, 71, 116, 36, 116, 71, 157, 116, 157, 116, 114, 157, 73, 116, 116, 200, 159, 116, 38, 157, 116, 116, 30, 73, 71, 206, 71, 116, 159, 157, 159, 202, 73, 202, 159, 120, 36, 116, 116, 118, 157, 116, 114, 71, 159, 114, 116, 118, 114, 116, 116, 73, 116, 116, 157, 116, 116, 116, 30, 114, 71, 157, 200, 116, 116, 116, 116, 73, 30, 34, 36, 36, 116, 200, 30, 32, 116, 114, 206, 243, 71, 114, 157, 251, 116, 116, 114, 200, 202, 251, 32, 116, 73, 114, 114, 30, 243, 116, 159, 71, 116, 120, 157, 32, 116, 243, 159, 30, 32, 206, 116, 116, 116, 30, 73, 200, 116, 200, 157, 243, 247, 159, 206, 204, 116, 116, 116, 71, 200, 159, 116, 116, 116, 116, 116, 243, 116, 116, 116, 245, 116, 71, 116, 157, 34, 243, 71, 116, 116, 157, 159, 200, 32, 116, 116, 114, 159, 71, 157, 71, 32, 157, 161, 114, 30, 30, 34, 159, 163, 247, 159, 114, 245, 114, 32, 200, 157, 159, 200, 245, 114, 204, 116, 114, 157, 200, 200, 116, 116, 116, 202, 116, 116, 30, 116, 114, 157, 116, 243, 114, 200, 243, 75, 165, 200, 245, 116, 34, 73, 157, 157, 116, 71, 114, 157, 116, 118, 243, 71, 116, 71, 71, 116, 116, 71, 114, 32, 116, 81, 116, 71, 159, 30, 245, 157, 167, 36, 114, 116, 114, 30, 116, 116, 114, 243, 116, 161, 200, 116, 73, 32, 116, 157, 114, 36, 157, 157, 73, 116, 116, 116, 159, 157, 116, 247, 122, 114, 159, 116, 159, 36, 243, 114, 73, 34, 73, 249, 71, 36, 243, 114, 116, 73, 116, 157, 159, 30, 116, 116, 114, 157, 114, 114, 206, 71, 243, 73, 114, 157, 114, 116, 71, 116, 114, 116, 116, 114, 114, 114, 159, 202, 116, 32, 157, 116, 73, 116, 71, 200, 202, 157, 71, 32, 30, 116, 71, 32, 243, 157, 73, 116, 251, 32, 116, 114, 243, 114, 114, 116, 157, 36, 34, 118, 120, 157, 114, 73, 114, 71, 251, 30, 200, 114, 73, 114, 73, 114, 44, 116, 116, 114, 157, 200, 116, 36, 157, 157, 200, 34, 157, 116, 200, 116, 71, 116, 157, 116, 114, 116, 116, 114, 157, 32, 116, 114, 114, 34, 71, 32, 116, 71, 116, 71, 116, 116, 71, 116, 116, 116, 116, 116, 114, 116, 200, 200, 247, 36, 116, 114, 114, 116, 114, 200, 116, 116, 116, 114, 157, 116, 71, 243, 243, 114, 30, 116, 32, 116, 116, 243, 114, 243, 116, 157, 161, 114, 114, 73, 73, 71, 116, 30, 71, 116, 116, 159, 116, 32, 34, 243, 157, 114, 71, 116, 157, 247, 30, 71, 245, 243, 116, 126, 245, 116, 71, 200, 75, 249, 243, 116, 30, 73, 200, 200, 116, 116, 167, 116, 116, 167, 116, 157, 71, 114, 71, 116, 200, 243, 114, 157, 32, 243, 116, 116, 200, 202, 120, 159, 159, 206, 75, 200, 204, 116, 71, 71, 114, 73, 71, 200, 118, 71, 32, 116, 159, 73, 116, 116, 73, 71, 30, 157, 157, 73, 200, 200, 157, 200, 247, 243, 73, 71, 75, 36, 71, 32, 36, 157, 116, 245, 71, 116, 114, 114, 157, 114, 116, 71, 32, 116, 30, 161, 71, 114, 157, 73, 200, 116, 116, 32, 157, 73, 116, 122, 116, 114, 71, 116, 34, 71, 116, 116, 200, 202, 157, 116, 116, 116, 200, 159, 159, 116, 157, 116, 114, 73, 245, 73, 116, 157, 32, 36, 116, 114, 206, 204, 71, 116, 116, 71, 32, 157, 114, 247, 30, 200, 247, 75, 200, 71, 159, 157, 116, 32, 114, 73, 30, 71, 114, 32, 73, 116, 30, 32, 202, 116, 116, 116, 116, 71, 116, 116, 159, 32, 71, 200, 116, 251, 116, 161, 159, 200, 73, 157, 114, 116, 114, 116, 116, 116, 116, 243, 77, 114, 202, 71, 71, 206, 71, 116, 116, 116, 32, 116, 71, 116, 116, 157, 116, 116, 116, 116, 116, 116, 30, 243, 116, 200, 120, 116, 122, 71, 75, 200, 157, 116, 159, 32, 71, 71, 116, 157, 114, 116, 114, 116, 71, 116, 116, 157, 116, 116, 116, 157, 71, 73, 116, 116, 202, 116, 116, 200, 243, 36, 157, 157, 251, 114, 71, 118, 116, 116, 116, 71, 116, 114, 116, 200, 114, 83, 251, 32, 116, 116, 114, 204, 30, 159, 32, 73, 167, 116, 243, 157, 116, 114, 116, 77, 75, 202, 114, 116, 157, 116, 157, 116, 116, 116, 159, 71, 200, 114, 118, 114, 116, 71, 116, 71, 159, 245, 116, 116, 200, 116, 116, 114, 251, 32, 114, 157, 36, 116, 200, 157, 124, 114, 71, 157, 251, 36, 116, 157, 118, 243, 30, 251, 32, 116, 114, 71, 116, 71, 116, 200, 75, 36, 36, 36, 116, 200, 163, 206, 116, 116, 116, 114, 36, 71, 73, 32, 118, 71, 116, 71, 200, 36, 245, 114, 157, 71, 157, 73, 200, 116, 116, 116, 116, 116, 157, 116, 200, 116, 114, 245, 157, 71, 243, 71, 116, 114, 75, 36, 161, 116, 30, 116, 202, 159, 71, 157, 116, 247, 71, 114, 75, 116, 73, 71, 157, 71, 200, 116, 157, 243, 116, 243, 159, 30, 32, 116, 116, 114, 116, 116, 118, 114, 71, 114, 73, 116, 251, 32, 116, 114, 243, 206, 118, 200, 116, 114, 116, 116, 30, 202, 73, 114, 200, 71, 75, 200, 116, 71, 116, 116, 200, 114, 71, 32, 116, 114, 73, 116, 71, 36, 116, 116, 71, 206, 71, 200, 114, 118, 71, 30, 71, 116, 159, 114, 116, 157, 71, 114, 73, 202, 73, 116, 157, 243, 116, 200, 243, 71, 116, 159, 114, 202, 116, 75, 200, 251, 116, 71, 73, 71, 73, 114, 116, 116, 116, 71, 30, 30, 36, 71, 245, 71, 251, 114, 157, 116, 251, 206, 36, 157, 202, 245, 114, 116, 202, 114, 157, 73, 200, 116, 36, 116, 120, 116, 116, 114, 116, 120, 71, 73, 245, 157, 116, 71, 157, 169, 202, 116, 71, 114, 75, 159, 118, 73, 114, 157, 114, 71, 161, 71, 30, 116, 116, 116, 116, 71, 75, 114, 71, 204, 30, 32, 116, 116, 116, 116, 116, 116, 243, 200, 157, 30, 157, 116, 116, 116, 116, 71, 118, 116, 157, 71, 32, 36, 116, 116, 71, 206, 118, 157, 71, 200, 114, 116, 116, 200, 75, 159, 249, 114, 116, 116, 114, 206, 116, 116, 71, 116, 71, 116, 167, 116, 157, 71, 32, 71, 116, 200, 116, 118, 204, 116, 71, 116, 116, 157, 71, 116, 32, 116, 114, 247, 204, 114, 157, 114, 200, 202, 114, 116, 157, 71, 157, 200, 157, 200, 30, 116, 200, 71, 200, 202, 157, 116, 245, 73, 114, 202, 116, 114, 77, 30, 114, 157, 251, 32, 116, 202, 32, 116, 116, 116, 116, 30, 36, 32, 116, 36, 116, 73, 116, 116, 116, 116, 167, 71, 71, 243, 202, 114, 200, 116, 71, 157, 73, 30, 114, 114, 163, 71, 206, 71, 116, 114, 200, 114, 114, 36, 36, 71, 73, 71, 114, 157, 116, 116, 157, 126, 157, 116, 32, 204, 71, 118, 71, 159, 71, 116, 71, 157, 116, 116, 114, 73, 116, 251, 32, 116, 157, 157, 118, 71, 243, 114, 30, 30, 34, 159, 163, 247, 71, 32, 114, 159, 251, 32, 116, 116, 32, 75, 157, 157, 71, 245, 71, 251, 114, 116, 114, 116, 116, 71, 202, 71, 71, 206, 116, 116, 116, 200, 118, 116, 116, 200, 202, 157, 202, 73, 116, 116, 114, 157, 71, 116, 116, 30, 157, 114, 116, 114, 157, 118, 116, 116, 116, 116, 251, 32, 116, 116, 116, 114, 157, 200, 30, 243, 75, 116, 116, 116, 114, 73, 71, 116, 116, 202, 71, 200, 71, 159, 114, 200, 245, 116, 75, 200, 243, 157, 116, 73, 71, 116, 118, 247, 200, 30, 75, 71, 116, 71, 157, 116, 114, 116, 118, 73, 157, 30, 243, 71, 247, 73, 200, 114, 157, 251, 32, 159, 243, 116, 161, 159, 118, 116, 116, 71, 30, 116, 71, 159, 75, 251, 157, 157, 116, 114, 116, 116, 243, 30, 30, 157, 243, 114, 157, 157, 71, 245, 159, 32, 245, 116, 114, 71, 116, 114, 157, 71, 116, 116, 116, 116, 116, 71, 118, 116, 120, 157, 32, 32, 71, 116, 116, 73, 114, 73, 114, 73, 30, 116, 116, 71, 116, 116, 71, 32, 202, 114, 30, 249, 114, 73, 200, 200, 159, 202, 114, 73, 71, 114, 75, 157, 157, 251, 32, 114, 30, 73, 114, 163, 114, 114, 116, 71, 116, 116, 243, 202, 75, 200, 116, 200, 116, 200, 116, 30, 116, 114, 157, 200, 114, 116, 116, 114, 114, 71, 200, 71, 75, 36, 32, 116, 75, 200, 71, 159, 157, 116, 114, 73, 30, 71, 114, 32, 159, 202, 73, 200, 32, 200, 116, 159, 71, 202, 159, 116, 73, 71, 116, 208, 206, 159, 159, 206, 32, 157, 116, 200, 200, 116, 251, 32, 71, 71, 77, 114, 71, 118, 116, 116, 116, 159, 116, 116, 116, 114, 116, 116, 118, 73, 73, 208, 116, 116, 116, 114, 159, 159, 159, 206, 30, 32, 116, 200, 161, 114, 114, 73, 73, 116, 157, 116, 116, 114, 116, 114, 247, 157, 32, 157, 116, 73, 116, 71, 200, 202, 157, 71, 32, 30, 116, 71, 32, 36, 36, 116, 245, 71, 245, 167, 243, 71, 34, 71, 114, 73, 159, 30, 157, 30, 118, 71, 71, 118, 116, 114, 157, 251, 32, 116, 116, 157, 159, 116, 157, 161, 114, 114, 73, 114, 157, 204, 157, 114, 157, 251, 116, 116, 116, 71, 77, 71, 71, 116, 116, 114, 116, 116, 157, 116, 116, 73, 116, 71, 243, 71, 116, 114, 75, 36, 114, 157, 36, 116, 32, 243, 30, 116, 71, 114, 116, 200, 245, 71, 116, 32, 36, 73, 159, 202, 116, 116, 116, 159, 206, 32, 116, 71, 32, 116, 116, 167, 200, 114, 157, 73, 116, 245, 114, 157, 245, 157, 114, 116, 114, 116, 114, 71, 116, 116, 116, 200, 30, 36, 36, 116, 157, 116, 202, 116, 116, 116, 116, 116, 116, 116, 159, 206, 200, 116, 73, 71, 30, 200, 116, 114, 157, 251, 157, 114, 71, 243, 202, 157, 116, 116, 116, 116, 251, 32, 116, 114, 116, 116, 114, 157, 251, 32, 114, 116, 243, 251, 32, 71, 157, 71, 32, 200, 32, 245, 114, 73, 116, 73, 200, 157, 114, 114, 118, 71, 30, 36, 116, 116, 202, 73, 167, 116, 73, 114, 71, 157, 159, 159, 157, 116, 116, 114, 159, 71, 206, 114, 116, 71, 116, 202, 159, 30, 71, 114, 243, 251, 32, 116, 114, 157, 116, 114, 157, 116, 116, 71, 159, 200, 114, 159, 75, 32, 116, 114, 243, 71, 116, 116, 116, 159, 71, 157, 161, 120, 243, 206, 114, 206, 30, 32, 116, 71, 116, 38, 243, 71, 114, 159, 157, 116, 200, 243, 114, 243, 200, 114, 116, 71, 73, 114, 116, 116, 116, 73, 116, 116, 71, 116, 32, 243, 116, 30, 116, 71, 200, 159, 202, 116, 71, 32, 116, 157, 157, 243, 116, 73, 163, 71, 206, 114, 116, 157, 247, 116, 116, 116, 71, 157, 157, 249, 71, 36, 116, 36, 71, 245, 71, 251, 114, 157, 116, 251, 32, 116, 200, 202, 114, 116, 200, 116, 157, 30, 73, 200, 73, 30, 159, 200, 32, 116, 157, 116, 116, 116, 71, 30, 200, 73, 116, 157, 120, 73, 114, 159, 114, 30, 157, 243, 114, 157, 71, 200, 116, 116, 75, 202, 116, 116, 116, 71, 116, 163, 116, 116, 202, 243, 71, 71, 116, 116, 116, 116, 114, 118, 71, 114, 116, 114, 114, 243, 116, 116, 116, 116, 116, 114, 200, 36, 36, 36, 116, 71, 206, 71, 116, 36, 116, 32, 116, 114, 116, 32, 200, 75, 116, 30, 116, 30, 114, 116, 163, 83, 32, 114, 116, 114, 243, 243, 114, 77, 243, 157, 71, 116, 116, 247, 116, 116, 114, 83, 157, 245, 116, 116, 116, 116, 114, 159, 30, 116, 118, 116, 116, 116, 249, 114, 116, 114, 30, 116, 157, 200, 114, 243, 157, 114, 159, 30, 116, 251, 116, 249, 243, 157, 71, 116, 71, 30, 116, 251, 32, 116, 30, 114, 157, 200, 157, 73, 71, 120, 116, 200, 124, 71, 157, 71, 157, 114, 157, 73, 116, 116, 114, 116, 116, 30, 116, 116, 116, 116, 116, 157, 116, 200, 116, 114, 245, 157, 71, 32, 116, 71, 71, 73, 245, 116, 116, 116, 114, 73, 116, 71, 116, 114, 116, 71, 36, 243, 114, 73, 116, 114, 116, 71, 36, 243, 114, 73, 34, 73, 249, 71, 36, 243, 114, 73, 34, 73, 249, 71, 202, 75, 71, 243, 71, 116, 200, 114, 71, 75, 77, 71, 200, 71, 247, 116, 71, 157, 118, 30, 245, 161, 71, 30, 243, 71, 159, 116, 36, 73, 34, 32, 114, 157, 71, 71, 116, 157, 30, 118, 32, 73, 116, 114, 114, 200, 159, 114, 116, 71, 157, 243, 116, 204, 71, 157, 251, 71, 243, 73, 116, 157, 71, 32, 251, 243, 157, 114, 157, 251, 32, 157, 114, 116, 245, 159, 73, 116, 116, 157, 71, 73, 157, 243, 71, 116, 251, 32, 243, 243, 36, 157, 116, 200, 71, 32, 159, 243, 116, 161, 204, 116, 114, 116, 200, 114, 116, 116, 116, 116, 32, 202, 159, 114, 114, 30, 116, 116, 30, 159, 32, 116, 243, 251, 243, 116, 71, 36, 116, 114, 30, 116, 114, 73, 114, 73, 157, 30, 73, 245, 83, 157, 116, 114, 116, 73, 243, 30, 116, 114, 116, 157, 116, 114, 116, 73, 116, 116, 116, 30, 114, 116, 71, 116, 116, 32, 116, 116, 159, 71, 157, 73, 204, 71, 36, 36, 116, 116, 73, 73, 36, 71, 71, 32, 116, 73, 114, 73, 200, 159, 200, 114, 116, 114, 116, 200, 71, 75, 200, 114, 157, 157, 157, 30, 116, 73, 157, 114, 73, 114, 116, 116, 157, 71, 157, 200, 114, 116, 114, 71, 200, 73, 202, 30, 30, 157, 116, 243, 114, 114, 157, 251, 32, 116, 116, 202, 71, 159, 157, 71, 157, 32, 73, 38, 71, 116, 245, 116, 157, 114, 71, 30, 30, 200, 71, 157, 73, 161, 116, 157, 71, 157, 114, 116, 116, 116, 114, 73, 116, 116, 118, 30, 32, 157, 200, 249, 114, 30, 71, 157, 32, 73, 116, 116, 157, 202, 114, 116, 116, 114, 114, 32, 118, 71, 243, 116, 71, 116, 116, 114, 116, 75, 116, 30, 73, 71, 116, 159, 116, 36, 116, 36, 114, 116, 116, 116, 36, 157, 116, 114, 116, 114, 200, 73, 114, 73, 116, 116, 116, 114, 75, 114, 161, 116, 116, 116, 114, 116, 116, 116, 167, 116, 116, 116, 116, 114, 157, 116, 116, 116, 116, 73, 114, 73, 73, 116, 114, 116, 116, 159, 71, 71, 32, 116, 245, 116, 114, 245, 116, 159, 120, 114, 116, 116, 159, 114, 32, 118, 114, 157, 114, 71, 30, 118, 71, 116, 71, 116, 157, 200, 73, 114, 71, 116, 200, 71, 116, 202, 157, 116, 114, 83, 157, 71, 116, 116, 114, 251, 32, 116, 114, 116, 116, 116, 167, 243, 157, 243, 157, 116, 116, 116, 208, 243, 32, 71, 157, 71, 77, 71, 71, 116, 116, 116, 157, 116, 114, 206, 208, 200, 32, 116, 38, 71, 71, 157, 118, 116, 116, 116, 71, 116, 114, 116, 73, 116, 118, 114, 245, 116, 116, 116, 116, 116, 116, 114, 114, 73, 116, 73, 116, 71, 202, 73, 116, 71, 36, 159, 116, 114, 157, 200, 71, 200, 200, 71, 200, 159, 114, 122, 159, 71, 159, 206, 71, 36, 114, 116, 114, 116, 71, 116, 114, 116, 114, 71, 73, 116, 71, 114, 116, 73, 71, 159, 118, 116, 116, 114, 73, 245, 73, 73, 36, 73, 73, 116, 116, 71, 116, 120, 116, 200, 159, 202, 71, 167, 116, 116, 71, 116, 116, 157, 161, 114, 114, 73, 73, 71, 116, 30, 114, 116, 116, 206, 245, 114, 116, 114, 157, 200, 114, 116, 204, 165, 116, 116, 116, 114, 157, 157, 114, 116, 116, 157, 118, 245, 71, 116, 116, 116, 71, 200, 116, 116, 116, 114, 71, 245, 71, 251, 114, 73, 157, 116, 116, 71, 116, 75, 116, 116, 116, 71, 159, 73, 116, 73, 114, 116, 114, 157, 116, 247, 114, 75, 116, 116, 114, 157, 200, 30, 161, 202, 32, 157, 116, 116, 118, 114, 114, 71, 247, 71, 116, 71, 202, 116, 73, 71, 30, 116, 73, 159, 159, 202, 116, 71, 71, 200, 116, 116, 71, 116, 116, 116, 165, 206, 202, 116, 71, 71, 243, 159, 116, 116, 116, 79, 118, 116, 116, 116, 30, 114, 243, 71, 200, 116, 200, 73, 71, 116, 116, 167, 116, 116, 71, 116, 116, 116, 116, 118, 116, 116, 116, 116, 116, 157, 116, 116, 157, 114, 157, 116, 157, 71, 32, 114, 202, 116, 116, 208, 116, 116, 118, 114, 159, 116, 116, 116, 114, 157, 34, 73, 116, 200, 161, 114, 114, 73, 73, 116, 116, 116, 71, 116, 32, 36, 114, 32, 114, 71, 32, 77, 71, 71, 116, 114, 73, 114, 157, 157, 73, 157, 245, 32, 116, 116, 116, 251, 32, 116, 157, 157, 245, 116, 71, 200, 159, 71, 116, 71, 71, 83, 32, 245, 157, 114, 116, 114, 116, 114, 71, 116, 120, 157, 157, 116, 114, 157, 116, 157, 116, 202, 116, 120, 73, 71, 73, 202, 116, 159, 206, 200, 116, 159, 116, 34, 116, 116, 159, 71, 157, 71, 71, 116, 32, 200, 71, 116, 116, 32, 114, 157, 251, 71, 208, 36, 30, 114, 157, 251, 32, 116, 71, 243, 243, 71, 36, 157, 32, 200, 159, 114, 71, 157, 202, 114, 114, 114, 157, 157, 251, 116, 116, 116, 114, 116, 116, 116, 114, 116, 36, 116, 114, 157, 71, 116, 157, 251, 200, 114, 71, 71, 206, 159, 251, 30, 157, 114, 116, 157, 30, 73, 245, 83, 243, 36, 243, 204, 243, 73, 116, 159, 71, 202, 159, 116, 116, 36, 200, 73, 116, 116, 71, 116, 116, 243, 114, 71, 200, 71, 116, 32, 71, 36, 116, 114, 157, 251, 30, 157, 159, 116, 114, 30, 116, 71, 118, 114, 157, 114, 71, 30, 114, 116, 73, 245, 157, 71, 32, 114, 202, 116, 116, 208, 116, 202, 116, 116, 116, 200, 157, 73, 251, 116, 116, 116, 116, 157, 36, 116, 114, 157, 71, 116, 159, 114, 202, 116, 114, 77, 157, 116, 116, 200, 114, 157, 157, 73, 116, 116, 116, 116, 202, 116, 71, 200, 71, 114, 116, 116, 114, 116, 116, 71, 116, 71, 32, 71, 32, 116, 71, 210, 114, 159, 116, 116, 32, 161, 71, 116, 243, 243, 114, 243, 116, 114, 116, 159, 116, 116, 71, 159, 116, 157, 157, 251, 116, 116, 116, 116, 71, 71, 116, 116, 157, 124, 32, 114, 116, 114, 114, 71, 30, 32, 202, 30, 157, 116, 114, 159, 30, 202, 116, 157, 200, 116, 116, 116, 116, 116, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 200, 243, 116, 116, 114, 116, 71, 116, 73, 36, 36, 243, 71, 73, 157, 114, 71, 30, 157, 114, 116, 157, 71, 71, 116, 116, 243, 118, 116, 32, 116, 114, 157, 116, 116, 114, 116, 116, 116, 116, 114, 157, 157, 116, 157, 71, 32, 30, 157, 200, 116, 71, 116, 114, 159, 116, 114, 243, 157, 116, 71, 116, 116, 116, 73, 114, 73, 73, 116, 71, 118, 114, 116, 114, 157, 251, 32, 75, 36, 36, 116, 114, 157, 251, 30, 71, 32, 116, 71, 116, 157, 114, 116, 114, 116, 114, 116, 116, 114, 157, 116, 116, 200, 118, 71, 157, 116, 30, 71, 73, 32, 118, 71, 116, 71, 36, 116, 157, 71, 32, 118, 114, 73, 114, 73, 116, 71, 116, 116, 157, 114, 116, 116, 116, 116, 116, 116, 71, 71, 159, 157, 116, 157, 75, 36, 116, 116, 116, 116, 114, 157, 116, 167, 116, 243, 116, 251, 32, 71, 71, 114, 30, 73, 71, 30, 157, 157, 73, 36, 114, 116, 116, 200, 75, 243, 157, 159, 114, 71, 30, 157, 157, 206, 114, 73, 32, 116, 32, 157, 116, 243, 116, 73, 116, 159, 32, 116, 202, 243, 118, 32, 116, 116, 116, 116, 200, 114, 159, 116, 116, 114, 157, 159, 159, 157, 116, 116, 116, 71, 30, 202, 157, 200, 32, 116, 116, 71, 71, 34, 243, 200, 116, 116, 71, 116, 243, 118, 200, 200, 114, 159, 116, 116, 114, 157, 200, 114, 75, 114, 159, 30, 32, 157, 157, 157, 243, 116, 114, 116, 71, 116, 116, 116, 116, 71, 114, 116, 116, 116, 116, 202, 114, 73, 116, 71, 245, 116, 116, 116, 202, 200, 71, 30, 71, 249, 114, 73, 202, 116, 116, 116, 32, 73, 71, 114, 243, 243, 116, 73, 116, 71, 32, 114, 116, 32, 116, 157, 71, 32, 118, 116, 32, 200, 36, 116, 114, 116, 116, 157, 30, 75, 116, 71, 71, 157, 200, 114, 116, 71, 116, 71, 116, 114, 32, 116, 36, 36, 116, 114, 157, 200, 116, 71, 116, 114, 32, 159, 116, 116, 114, 116, 71, 157, 157, 157, 251, 32, 157, 116, 157, 116, 167, 116, 157, 114, 157, 167, 116, 157, 243, 116, 71, 114, 36, 36, 36, 116, 71, 116, 116, 114, 157, 116, 32, 165, 116, 118, 114, 71, 157, 32, 118, 116, 73, 116, 71, 36, 159, 30, 202, 157, 202, 30, 114, 116, 157, 32, 204, 71, 206, 71, 116, 114, 116, 114, 157, 251, 114, 116, 71, 202, 30, 36, 36, 116, 114, 114, 71, 75, 200, 30, 116, 116, 30, 159, 32, 116, 116, 73, 30, 116, 251, 116, 247, 157, 71, 32, 32, 71, 116, 30, 157, 200, 157, 204, 32, 36, 157, 116, 200, 114, 116, 116, 73, 71, 157, 30, 71, 116, 157, 114, 116, 114, 116, 116, 116, 71, 114, 73, 114, 243, 114, 200, 71, 118, 114, 159, 116, 116, 116, 114, 157, 34, 73, 116, 116, 71, 116, 116, 114, 116, 116, 200, 157, 157, 116, 71, 116, 116, 116, 32, 36, 36, 75, 114, 116, 114, 116, 116, 116, 116, 116, 159, 71, 157, 161, 116, 116, 251, 32, 71, 245, 116, 116, 116, 30, 116, 116, 71, 118, 73, 116, 157, 114, 116, 159, 71, 157, 251, 157, 114, 71, 30, 157, 116, 71, 116, 32, 114, 116, 114, 116, 116, 157, 116, 116, 116, 159, 159, 157, 200, 114, 116, 116, 114, 116, 116, 245, 116, 114, 157, 251, 36, 116, 157, 157, 73, 200, 36, 116, 32, 116, 114, 157, 36, 116, 30, 114, 157, 200, 157, 114, 116, 30, 120, 116, 116, 116, 202, 71, 118, 71, 71, 116, 114, 116, 116, 202, 200, 71, 30, 71, 249, 36, 114, 116, 73, 200, 71, 116, 114, 73, 71, 206, 116, 118, 114, 114, 159, 116, 202, 157, 116, 116, 30, 71, 114, 116, 116, 114, 116, 114, 116, 36, 157, 114, 116, 116, 116, 116, 200, 36, 116, 114, 71, 116, 32, 157, 73, 116, 116, 245, 116, 116, 116, 71, 116, 116, 243, 116, 116, 116, 116, 114, 116, 116, 114, 116, 71, 116, 114, 116, 73, 116, 116, 116, 30, 114, 116, 71, 116, 116, 116, 73, 116, 71, 243, 71, 116, 114, 75, 36, 114, 157, 116, 116, 116, 116, 126, 202, 114, 116, 200, 243, 114, 243, 34, 73, 157, 118, 200, 200, 157, 200, 159, 30, 30, 157, 116, 114, 116, 167, 116, 157, 71, 30, 114, 116, 71, 116, 126, 202, 114, 116, 200, 243, 114, 157, 116, 116, 114, 116, 114, 71, 114, 116, 116, 114, 206, 114, 157, 73, 167, 116, 243, 116, 251, 32, 71, 71, 157, 71, 116, 116, 71, 200, 202, 114, 114, 116, 73, 116, 116, 116, 116, 71, 116, 243, 116, 251, 32, 116, 206, 159, 251, 32, 73, 116, 116, 157, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 73, 116, 116, 30, 71, 243, 243, 200, 71, 116, 32, 157, 36, 36, 116, 114, 157, 251, 32, 116, 71, 116, 116, 71, 159, 206, 32, 116, 71, 32, 71, 116, 116, 114, 114, 157, 73, 71, 200, 71, 75, 36, 71, 116, 71, 116, 32, 157, 116, 200, 73, 202, 243, 71, 71, 116, 32, 116, 116, 116, 71, 114, 157, 116, 71, 243, 71, 116, 116, 116, 161, 30, 75, 116, 71, 159, 116, 157, 157, 251, 243, 157, 114, 157, 251, 32, 75, 114, 161, 116, 116, 116, 114, 116, 116, 116, 167, 116, 116, 32, 243, 71, 71, 116, 157, 251, 32, 116, 157, 116, 159, 71, 157, 116, 249, 251, 32, 116, 114, 157, 116, 200, 32, 116, 114, 251, 32, 116, 157, 128, 118, 71, 116, 114, 114, 71, 73, 116, 116, 157, 71, 243, 157, 116, 32, 36, 73, 251, 32, 114, 157, 245, 116, 245, 116, 71, 71, 157, 251, 71, 247, 245, 114, 73, 200, 200, 157, 73, 116, 116, 116, 73, 116, 30, 73, 251, 32, 116, 38, 243, 116, 114, 73, 114, 71, 204, 116, 114, 114, 73, 73, 159, 202, 116, 71, 116, 116, 116, 118, 116, 116, 116, 71, 114, 243, 114, 200, 114, 73, 116, 71, 116, 32, 167, 206, 32, 116, 114, 157, 71, 157, 116, 116, 114, 116, 114, 71, 114, 116, 116, 114, 116, 30, 116, 116, 116, 71, 30, 116, 251, 32, 71, 71, 157, 71, 116, 116, 71, 202, 159, 114, 114, 114, 116, 116, 116, 114, 30, 116, 251, 32, 71, 71, 77, 116, 71, 71, 251, 73, 71, 71, 32, 36, 157, 157, 243, 116, 157, 73, 200, 71, 116, 116, 116, 157, 73, 116, 116, 200, 159, 116, 116, 116, 116, 167, 116, 116, 71, 73, 116, 71, 200, 116, 200, 73, 157, 114, 73, 157, 32, 157, 200, 30, 75, 200, 116, 200, 116, 200, 116, 30, 73, 71, 116, 159, 116, 71, 116, 202, 73, 71, 116, 71, 159, 114, 114, 116, 73, 116, 116, 116, 30, 202, 157, 116, 30, 75, 71, 36, 116, 114, 157, 71, 116, 159, 114, 71, 157, 251, 114, 200, 114, 116, 116, 157, 120, 247, 204, 114, 116, 73, 71, 73, 202, 120, 159, 159, 206, 75, 71, 200, 159, 159, 159, 116, 34, 116, 116, 159, 71, 157, 71, 71, 116, 32, 200, 71, 116, 116, 157, 206, 71, 116, 116, 116, 71, 73, 157, 116, 200, 77, 114, 116, 116, 71, 73, 36, 243, 32, 73, 116, 30, 202, 71, 71, 71, 114, 116, 116, 32, 71, 200, 202, 116, 243, 71, 116, 159, 157, 116, 118, 204, 116, 114, 157, 30, 157, 71, 32, 73, 30, 114, 116, 32, 245, 157, 71, 157, 200, 71, 116, 157, 36, 36, 116, 157, 116, 116, 116, 116, 71, 116, 79, 243, 32, 30, 73, 73, 116, 116, 71, 114, 75, 30, 73, 116, 116, 116, 116, 30, 243, 159, 71, 116, 251, 116, 36, 116, 114, 159, 71, 36, 71, 243, 159, 71, 116, 114, 116, 206, 71, 114, 157, 73, 116, 116, 71, 116, 71, 201, 201, 201, 201, 201, 201, 201, 201, 36, 36, 36, 36, 36, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 167, 116, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 116, 71, 116, 32, 36, 36, 36, 36, 116, 114, 157, 251, 32, 116, 71, 116, 116, 116, 71, 116, 36, 116, 114, 157, 251, 32, 116, 116, 71, 116, 116, 116, 71, 36, 116, 114, 157, 159, 116, 32, 36, 36, 36, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 32, 116, 116, 71, 116, 114, 157, 251, 32, 116, 116, 116, 116, 114, 116, 116, 116, 114, 116, 116, 114, 157, 251, 32, 116, 116, 71, 116, 114, 157, 116, 116, 114, 116, 114, 116, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 32, 36, 114, 116, 73, 73, 36, 116, 114, 157, 251, 32, 116, 71, 116, 159, 36, 36, 116, 116, 116, 116, 116, 114, 116, 116, 114, 206, 71, 116, 116, 116, 116, 114, 116, 116, 114, 116, 116, 116, 116, 116, 71, 116, 114, 157, 251, 32, 157, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 32, 36, 114, 116, 73, 73, 71, 116, 114, 157, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 32, 202, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 157, 34, 157, 116, 114, 116, 116, 118, 114, 116, 116, 116, 116, 116, 71, 116, 114, 157, 251, 32, 157, 116, 116, 71, 116, 116, 116, 116, 114, 116, 73, 73, 36, 116, 114, 157, 36, 36, 116, 116, 116, 73, 73, 36, 71, 36, 36, 36, 116, 116, 116, 116, 157, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 71, 116, 32, 73, 71, 116, 32, 36, 30, 114, 157, 251, 32, 116, 114, 157, 116, 200, 32, 116, 71, 116, 116, 71, 116, 116, 71, 116, 114, 157, 251, 32, 116, 116, 116, 116, 114, 116, 116, 116, 32, 73, 71, 116, 32, 116, 116, 116, 116, 159, 71, 157, 116, 114, 116, 116, 118, 114, 71, 71, 206, 71, 116, 116, 116, 202, 116, 116, 116, 116, 116, 114, 116, 116, 116, 71, 116, 114, 157, 73, 200, 157, 200, 157, 200, 157, 200, 71, 75, 36, 32, 116, 116, 116, 116, 114, 73, 114, 73, 114, 73, 114, 73, 116, 116, 116, 157, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 71, 116, 251, 32, 116, 114, 116, 116, 114, 206, 71, 116, 116, 202, 157, 251, 32, 116, 116, 71, 116, 200, 124, 116, 114, 157, 159, 116, 116, 116, 202, 116, 116, 116, 116, 116, 114, 159, 118, 114, 71, 71, 206, 71, 116, 116, 116, 116, 116, 116, 71, 73, 114, 116, 116, 116, 71, 116, 32, 157, 73, 30, 157, 116, 114, 157, 251, 32, 157, 159, 202, 116, 73, 73, 116, 114, 157, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 116, 71, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 32, 200, 116, 116, 116, 116, 116, 159, 118, 114, 71, 71, 206, 116, 116, 116, 159, 71, 157, 157, 249, 116, 116, 116, 116, 116, 114, 159, 32, 157, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 36, 116, 116, 116, 116, 116, 71, 116, 32, 157, 73, 116, 116, 116, 116, 114, 32, 73, 71, 116, 202, 157, 116, 116, 116, 116, 73, 114, 116, 116, 118, 73, 116, 116, 116, 157, 116, 167, 36, 71, 36, 36, 32, 157, 159, 202, 116, 32, 71, 116, 116, 116, 71, 36, 116, 114, 157, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 251, 32, 114, 157, 159, 116, 243, 116, 73, 73, 71, 116, 114, 157, 161, 157, 251, 116, 71, 116, 116, 200, 114, 159, 30, 116, 116, 116, 71, 116, 157, 34, 157, 116, 200, 116, 116, 116, 116, 71, 71, 206, 116, 73, 114, 116, 116, 118, 204, 116, 116, 116, 120, 30, 114, 116, 73, 114, 116, 116, 118, 116, 36, 116, 114, 157, 116, 114, 157, 251, 32, 114, 157, 159, 206, 71, 116, 116, 200, 71, 116, 157, 34, 116, 116, 116, 71, 36, 116, 116, 116, 116, 206, 116, 73, 114, 116, 71, 116, 116, 116, 116, 71, 116, 159, 116, 116, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 114, 75, 36, 32, 116, 116, 116, 116, 114, 116, 114, 116, 114, 116, 200, 116, 116, 116, 114, 157, 200, 157, 116, 167, 116, 157, 71, 32, 116, 116, 116, 71, 36, 116, 114, 157, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 251, 114, 116, 73, 73, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 157, 114, 116, 116, 116, 116, 167, 116, 116, 71, 73, 116, 157, 71, 32, 116, 114, 30, 159, 116, 116, 116, 157, 114, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 114, 116, 32, 157, 73, 71, 116, 36, 116, 114, 157, 251, 32, 116, 116, 157, 251, 32, 32, 157, 71, 116, 114, 75, 36, 32, 116, 116, 116, 116, 114, 116, 116, 116, 116, 71, 116, 157, 34, 204, 116, 114, 157, 251, 243, 116, 200, 116, 116, 116, 114, 157, 200, 157, 116, 251, 32, 157, 116, 116, 71, 116, 116, 114, 157, 116, 114, 116, 116, 118, 116, 116, 116, 116, 116, 71, 116, 116, 157, 114, 157, 71, 32, 116, 114, 157, 116, 114, 116, 116, 118, 73, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 157, 114, 116, 116, 116, 116, 167, 116, 116, 71, 116, 116, 118, 114, 71, 71, 206, 30, 32, 116, 116, 114, 116, 200, 71, 116, 157, 34, 116, 116, 116, 116, 116, 116, 159, 116, 116, 116, 114, 116, 116, 116, 116, 247, 116, 116, 116, 116, 116, 116, 116, 116, 114, 83, 251, 32, 116, 116, 116, 116, 114, 36, 116, 116, 116, 116, 71, 116, 116, 116, 200, 36, 116, 114, 32, 116, 116, 71, 116, 116, 116, 116, 71, 71, 157, 116, 116, 120, 30, 114, 116, 114, 157, 116, 114, 116, 116, 71, 116, 114, 116, 71, 116, 157, 34, 157, 116, 114, 116, 116, 118, 114, 116, 116, 161, 159, 118, 114, 71, 73, 32, 116, 116, 116, 114, 71, 243, 116, 116, 116, 116, 200, 73, 157, 245, 32, 116, 116, 116, 71, 36, 116, 114, 114, 73, 114, 73, 116, 116, 118, 114, 71, 114, 157, 251, 114, 73, 73, 159, 202, 116, 71, 116, 116, 116, 116, 114, 32, 73, 71, 116, 202, 157, 116, 116, 116, 116, 73, 114, 116, 120, 30, 114, 116, 36, 116, 114, 157, 251, 32, 116, 116, 157, 251, 32, 32, 245, 114, 73, 114, 159, 30, 32, 116, 116, 114, 116, 200, 71, 116, 157, 34, 116, 71, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 114, 73, 71, 206, 71, 116, 116, 71, 32, 116, 114, 157, 116, 114, 116, 116, 116, 157, 116, 167, 116, 116, 116, 116, 116, 200, 73, 157, 245, 32, 116, 116, 32, 114, 73, 114, 116, 116, 116, 116, 120, 157, 32, 116, 71, 116, 116, 159, 202, 116, 73, 73, 116, 157, 159, 251, 32, 116, 114, 116, 116, 114, 206, 71, 116, 200, 71, 116, 116, 71, 116, 251, 157, 159, 202, 116, 71, 116, 116, 116, 71, 36, 116, 114, 157, 116, 116, 116, 157, 114, 116, 114, 116, 116, 114, 36, 36, 116, 116, 116, 116, 157, 116, 114, 157, 251, 32, 114, 243, 243, 71, 36, 32, 116, 36, 116, 116, 116, 116, 116, 114, 157, 251, 32, 202, 116, 71, 36, 116, 114, 157, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 71, 116, 71, 116, 159, 36, 36, 116, 116, 116, 157, 114, 71, 73, 116, 157, 71, 32, 116, 114, 157, 251, 32, 116, 116, 159, 71, 157, 116, 30, 71, 73, 71, 116, 116, 114, 157, 116, 114, 116, 73, 116, 114, 83, 251, 32, 71, 157, 71, 32, 116, 71, 116, 116, 71, 116, 32, 204, 116, 71, 116, 116, 71, 200, 159, 202, 116, 71, 159, 116, 157, 34, 116, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 114, 73, 71, 206, 116, 118, 114, 71, 114, 157, 251, 114, 73, 73, 159, 202, 116, 71, 116, 116, 114, 116, 120, 30, 73, 71, 116, 200, 243, 71, 116, 159, 159, 116, 116, 159, 71, 157, 116, 30, 71, 73, 71, 116, 116, 114, 157, 116, 114, 116, 116, 200, 36, 36, 36, 32, 116, 116, 251, 32, 202, 116, 71, 36, 32, 116, 116, 116, 200, 200, 114, 159, 30, 116, 251, 32, 71, 157, 71, 34, 157, 116, 200, 116, 71, 116, 116, 157, 116, 114, 116, 108, 123, 108, 123, 123, 108, 108, 108, 123, 108, 123, 108, 108, 108, 108, 123, 108, 108, 180, 41, 8, 23, 108, 108, 108, 108, 138, 108, 108, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 114, 114, 114, 114, 22, 22, 22, 22, 22, 114, 114, 114, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 174, 174, 174, 209, 36, 36, 36, 36, 36, 114, 114, 114, 114, 114, 114, 114, 36, 36, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 126, 36, 36, 114, 114, 114, 114, 36, 36, 36, 209, 209, 36, 36, 36, 36, 22, 22, 22, 22, 22, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 22, 22, 22, 36, 36, 36, 36, 36, 209, 36, 36, 114, 114, 114, 209, 36, 209, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 209, 36, 126, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 126, 36, 36, 36, 36, 36, 36, 126, 36, 36, 126, 36, 36, 36, 209, 209, 36, 36, 36, 36, 36, 36, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 78, 195, 245, 59, 217, 14, 252, 44, 26, 12, 204, 59, 197, 142, 29, 197, 226, 175, 153, 217, 212, 98, 72, 19, 172, 89, 208, 43, 201, 108, 168, 138, 233, 88, 222, 65, 209, 31, 219, 186, 171, 126, 201, 32, 28, 155, 152, 86, 47, 225, 214, 5, 41, 87, 218, 40, 219, 12, 182, 245, 144, 149, 218, 88, 138, 18, 23, 157, 207, 218, 63, 171, 193, 220, 201, 220, 193, 66, 238, 192, 83, 47, 65, 43, 37, 76, 151, 159, 213, 58, 37, 142, 81, 216, 39, 134, 47, 167, 133, 158, 180, 122, 162, 227, 84, 83, 104, 58, 235, 206, 64, 122, 174, 54, 91, 80, 157, 33, 21, 120, 18, 118, 61, 122, 48, 22, 122, 144, 76, 233, 62, 147, 232, 105, 138, 217, 233, 34, 205, 1, 210, 151, 77, 116, 61, 101, 184, 187, 203, 254, 62, 66, 234, 95, 67, 203, 31, 0, 6, 116, 8, 5, 0, 172, 228, 252, 22, 1, 242, 247, 126, 255, 13, 38, 145, 189, 161, 254, 56, 54, 70, 189, 110, 93, 52, 210, 113, 108, 149, 244, 54, 218, 169, 194, 160, 86, 227, 129, 134, 5, 107, 215, 232, 14, 253, 136, 195, 147, 239, 137, 214, 91, 174, 242, 172, 88, 7, 210, 14, 143, 136, 24, 154, 75, 33, 176, 210, 70, 183, 31, 146, 243, 123, 21, 158, 57, 149, 174, 191, 143, 169, 216, 31, 127, 98, 246, 166, 161, 32, 139, 196, 150, 150, 233, 124, 156, 67, 90, 169, 246, 181, 149, 119, 46, 103, 140, 15, 131, 209, 103, 172, 27, 198, 170, 71, 36, 56, 251, 205, 158, 106, 154, 9, 15, 48, 104, 237, 222, 140, 34, 198, 60, 198, 206, 247, 49, 238, 93, 65, 170, 87, 188, 183, 166, 154, 43, 49, 44, 83, 44, 26, 31, 92, 234, 67, 174, 144, 124, 121, 73, 143, 176, 3, 197, 50, 239, 110, 84, 164, 161, 253, 22, 226, 89, 138, 67, 205, 63, 128, 230, 207, 115, 81, 221, 70, 2, 196, 162, 214, 11, 89, 150, 202, 169, 247, 60, 117, 142, 243, 203, 85, 96, 41, 16, 47, 121, 182, 51, 172, 125, 180, 120, 79, 86, 233, 209, 186, 81, 154, 174, 9, 60, 57, 166, 190, 107, 179, 121, 100, 150, 119, 181, 198, 45, 36, 184, 66, 30, 54, 239, 14, 196, 71, 248, 184, 166, 252, 8, 132, 40, 235, 170, 99, 124, 174, 165, 24, 171, 186, 186, 191, 94, 44, 188, 204, 62, 219, 192, 30, 143, 20, 58, 67, 14, 86, 52, 15, 253, 142, 137, 61, 24, 171, 220, 177, 194, 133, 56, 202, 110, 202, 120, 66, 22, 97, 110, 169, 180, 172, 47, 96, 12, 102, 122, 108, 214, 183, 41, 82, 108, 192, 10, 159, 104, 26, 152, 127, 122, 112, 217, 180, 2, 186, 60, 107, 206, 108, 78, 78, 16, 113, 232, 230, 68, 17, 153, 65, 90, 243, 116, 169, 69, 112, 24, 220, 170, 146, 19, 208, 89, 129, 30, 50, 138, 81, 223, 3, 98, 114, 203, 91, 32, 143, 28, 186, 234, 116, 62, 81, 142, 177, 54, 9, 189, 132, 14, 102, 72, 203, 1, 27, 55, 91, 212, 180, 209, 187, 201, 206, 64, 219, 24, 143, 74, 197, 247, 220, 21, 137, 80, 64, 225, 174, 57, 189, 78, 198, 143, 133, 216, 148, 115, 7, 197, 144, 102, 167, 163, 153, 182, 77, 141, 238, 82, 220, 1, 115, 153, 153, 26, 118, 50, 158, 189, 61, 111, 182, 52, 0, 246, 213, 245, 7, 123, 25, 118, 148, 224, 206, 243, 34, 204, 206, 128, 154, 116, 235, 229, 255, 193, 254, 111, 130, 0, 27, 55, 94, 251, 78, 70, 28, 63, 170, 14, 92, 159, 199, 218, 131, 89, 124, 242, 87, 184, 158, 222, 48, 208, 246, 49, 78, 33, 143, 251, 211, 195, 1, 239, 206, 47, 37, 12, 65, 56, 142, 238, 112, 172, 107, 188, 139, 122, 64, 54, 11, 71, 220, 255, 223, 63, 27, 206, 129, 204, 56, 32, 34, 63, 84, 78, 245, 199, 48, 143, 181, 186, 45, 94, 191, 157, 19, 58, 22, 214, 244, 209, 106, 82, 68, 225, 65, 134, 253, 7, 230, 159, 233, 58, 48, 173, 136, 28, 73, 116, 245, 37, 179, 20, 16, 121, 42, 124, 171, 214, 109, 244, 117, 149, 244, 19, 222, 121, 26, 175, 115, 188, 213, 190, 142, 190, 137, 188, 22, 30, 127, 227, 46, 147, 128, 195, 230, 207, 76, 47, 182, 172, 171, 47, 202, 172, 37, 207, 93, 0, 226, 20, 79, 121, 103, 129, 196, 103, 57, 170, 91, 124, 22, 133, 69, 231, 34, 99, 249, 99, 245, 21, 47, 249, 194, 148, 91, 201, 104, 162, 167, 171, 169, 200, 67, 160, 91, 170, 144, 92, 240, 71, 243, 255, 137, 169, 179, 92, 6, 147, 91, 73, 177, 72, 111, 16, 111, 118, 153, 5, 234, 238, 60, 164, 140, 122, 131, 86, 27, 105, 140, 212, 106, 180, 235, 45, 89, 148, 161, 73, 168, 12, 124, 72, 110, 3, 202, 247, 1, 195, 183, 79, 200, 56, 41, 46, 175, 35, 54, 91, 28, 179, 104, 109, 10, 227, 252, 211, 182, 118, 100, 165, 25, 202, 101, 64, 113, 202, 103, 96, 7, 95, 107, 242, 203, 184, 239, 190, 152, 109, 149, 84, 64, 60, 40, 238, 30, 109, 0, 116, 45, 47, 101, 236, 170, 39, 210, 219, 139, 79, 44, 240, 215, 198, 3, 143, 128, 218, 121, 130, 34, 207, 138, 224, 129, 35, 49, 123, 222, 56, 203, 110, 218, 250, 142, 70, 50, 213, 139, 49, 236, 74, 8, 16, 149, 157, 143, 154, 131, 133, 193, 103, 225, 241, 251, 85, 187, 170, 195, 92, 119, 91, 90, 114, 191, 215, 79, 177, 109, 161, 214, 43, 32, 31, 126, 74, 214, 160, 48, 11, 48, 99, 10, 195, 4, 179, 82, 237, 188, 194, 136, 79, 41, 90, 190, 18, 166, 54, 139, 93, 16, 81, 32, 193, 142, 162, 93, 75, 48, 182, 94, 77, 73, 114, 219, 229, 241, 49, 121, 253, 145, 102, 200, 227, 179, 51, 251, 157, 77, 121, 243, 54, 157, 135, 197, 231, 133, 126, 153, 118, 114, 183, 64, 205, 88, 218, 90, 34, 200, 234, 176, 196, 218, 125, 108, 214, 161, 81, 194, 225, 32, 96, 180, 3, 163, 94, 54, 146, 115, 130, 93, 71, 16, 23, 161, 121, 71, 68, 39, 31, 194, 141, 241, 24, 254, 217, 189, 66, 148, 202, 14, 96, 226, 101, 238, 131, 116, 38, 160, 251, 98, 96, 136, 195, 178, 88, 217, 18, 38, 221, 81, 66, 193, 24, 204, 170, 151, 86, 193, 225, 236, 134, 71, 167, 83, 205, 94, 160, 143, 93, 187, 210, 228, 215, 55, 209, 239, 48, 203, 125, 101, 173, 8, 92, 81, 54, 74, 157, 213, 69, 85, 63, 221, 158, 169, 58, 171, 104, 60, 148, 243, 74, 161, 48, 65, 154, 56, 186, 220, 192, 48, 15, 74, 70, 45, 255, 157, 237, 130, 36, 247, 202, 27, 0, 169, 76, 76, 16, 199, 57, 14, 206, 47, 182, 45, 109, 245, 50, 25, 61, 200, 240, 198, 85, 246, 173, 109, 188, 66, 53, 47, 61, 48, 18, 174, 185, 77, 43, 89, 172, 20, 85, 112, 129, 220, 29, 23, 103, 202, 202, 202, 202, 202, 202, 202, 202, 178, 179, 178, 196, 109, 109, 109, 109, 109, 109, 109, 225, 225, 178, 148, 196, 179, 178, 178, 114, 225, 85, 109, 109, 109, 109, 225, 178, 56, 109, 109, 109, 254, 109, 109, 109, 109, 27, 193, 58, 109, 109, 109, 254, 114, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 178, 178, 109, 109, 85, 196, 254, 254, 225, 172, 196, 172, 71, 85, 56, 254, 172, 172, 196, 225, 114, 56, 143, 196, 196, 177, 27, 254, 27, 32, 230, 225, 3, 27, 254, 90, 114, 85, 196, 254, 196, 225, 172, 148, 56, 143, 114, 27, 56, 85, 225, 8, 90, 196, 114, 230, 27, 196, 230, 56, 56, 172, 225, 254, 172, 196, 201, 178, 180, 178, 178, 178, 109, 109, 254, 225, 196, 27, 27, 225, 201, 196, 254, 225, 225, 56, 225, 27, 254, 56, 13, 27, 196, 254, 196, 225, 90, 225, 206, 254, 254, 225, 56, 225, 225, 254, 3, 254, 230, 27, 196, 172, 196, 56, 254, 32, 56, 85, 196, 143, 230, 27, 196, 201, 254, 27, 56, 114, 114, 225, 196, 225, 143, 254, 254, 196, 254, 196, 172, 143, 201, 230, 196, 196, 225, 196, 32, 254, 201, 143, 196, 235, 196, 196, 56, 114, 56, 153, 85, 109, 109, 109, 27, 85, 56, 196, 196, 196, 225, 225, 196, 8, 254, 90, 225, 143, 225, 254, 27, 8, 196, 27, 196, 114, 196, 230, 196, 254, 61, 85, 114, 3, 27, 32, 3, 254, 196, 56, 114, 143, 114, 85, 172, 90, 254, 172, 206, 196, 196, 56, 37, 61, 196, 32, 225, 27, 85, 172, 196, 114, 61, 196, 85, 148, 114, 225, 196, 225, 56, 56, 196, 225, 114, 27, 225, 196, 230, 201, 27, 27, 196, 254, 56, 254, 114, 56, 196, 85, 254, 56, 56, 143, 56, 196, 254, 143, 172, 85, 27, 225, 225, 196, 225, 114, 90, 225, 196, 27, 196, 172, 114, 85, 196, 27, 201, 56, 172, 225, 61, 27, 254, 90, 85, 225, 196, 225, 56, 196, 114, 201, 56, 172, 254, 225, 27, 196, 56, 254, 201, 172, 85, 148, 254, 27, 196, 56, 61, 143, 56, 56, 61, 201, 196, 148, 196, 114, 27, 114, 143, 196, 114, 225, 32, 225, 85, 254, 225, 196, 230, 196, 143, 56, 109, 109, 109, 148, 254, 27, 196, 254, 225, 225, 254, 3, 27, 225, 235, 254, 254, 114, 27, 61, 196, 61, 230, 201, 172, 196, 27, 196, 196, 196, 254, 172, 225, 254, 196, 114, 254, 114, 114, 254, 85, 196, 225, 230, 196, 85, 85, 196, 85, 119, 56, 114, 27, 196, 56, 254, 196, 27, 225, 85, 196, 56, 230, 225, 225, 196, 27, 71, 254, 196, 27, 201, 114, 225, 119, 254, 225, 230, 56, 225, 225, 172, 27, 27, 172, 196, 56, 225, 196, 196, 177, 119, 119, 225, 95, 27, 225, 27, 114, 85, 206, 196, 119, 196, 196, 254, 230, 172, 201, 196, 196, 27, 114, 225, 56, 196, 225, 196, 27, 196, 143, 254, 85, 56, 143, 119, 225, 114, 56, 225, 114, 196, 225, 85, 230, 225, 225, 254, 254, 254, 56, 225, 143, 254, 196, 172, 225, 201, 254, 196, 196, 3, 85, 235, 254, 3, 254, 27, 172, 196, 172, 85, 56, 254, 225, 225, 143, 32, 172, 143, 114, 27, 196, 143, 27, 85, 85, 254, 254, 254, 85, 148, 143, 225, 196, 196, 56, 148, 3, 254, 254, 56, 56, 172, 114, 85, 56, 85, 90, 85, 109, 109, 109, 109, 95, 37, 254, 225, 225, 27, 172, 114, 196, 230, 201, 196, 85, 27, 85, 85, 254, 254, 230, 177, 254, 225, 3, 114, 172, 196, 254, 114, 196, 27, 225, 61, 201, 114, 27, 177, 114, 172, 85, 196, 27, 27, 225, 56, 153, 225, 27, 61, 143, 196, 27, 85, 27, 56, 143, 172, 3, 196, 225, 196, 196, 85, 143, 27, 196, 56, 254, 56, 225, 196, 225, 56, 56, 196, 27, 56, 148, 196, 114, 114, 225, 27, 225, 85, 27, 172, 109, 109, 109, 14, 230, 238, 182, 252, 109, 245, 37, 24, 187, 152, 238, 184, 132, 6, 43, 149, 236, 145, 152, 59, 58, 211, 73, 221, 16, 203, 163, 221, 235, 5, 133, 107, 39, 127, 52, 44, 253, 35, 83, 20, 225, 120, 17, 10, 89, 24, 157, 230, 218, 15, 51, 54, 93, 133, 26, 156, 74, 66, 81, 39, 225, 198, 192, 196, 239, 145, 226, 94, 83, 106, 202, 60, 128, 178, 180, 180, 236, 248, 207, 102, 54, 68, 141, 223, 27, 101, 55, 99, 46, 204, 114, 160, 173, 105, 90, 180, 18, 183, 239, 237, 198, 23, 173, 185, 229, 137, 223, 216, 146, 255, 140, 198, 172, 238, 94, 4, 237, 98, 20, 131, 222, 211, 218, 241, 192, 176, 80, 237, 118, 88, 228, 18, 143, 234, 236, 4, 8, 239, 36, 4, 249, 146, 65, 105, 117, 110, 171, 148, 172, 196, 114, 214, 76, 1, 16, 185, 178, 54, 205, 117, 33, 96, 251, 187, 54, 168, 157, 80, 94, 61, 18, 42, 123, 69, 118, 17, 58, 146, 238, 116, 252, 141, 38, 37, 226, 250, 115, 169, 102, 38, 3, 186, 144, 78, 169, 193, 54, 80, 93, 66, 246, 214, 53, 151, 22, 65, 153, 156, 102, 38, 227, 92, 27, 216, 74, 160, 146, 185, 233, 67, 77, 91, 208, 78, 96, 129, 35, 188, 71, 10, 31, 62, 24, 57, 202, 52, 56, 227, 110, 107, 232, 64, 228, 81, 243, 88, 53, 21, 142, 32, 56, 26, 170, 113, 252, 137, 118, 44, 69, 214, 242, 57, 181, 250, 223, 93, 66, 77, 141, 224, 216, 200, 21, 146, 217, 25, 85, 214, 161, 137, 132, 157, 71, 196, 132, 178, 39, 119, 154, 112, 218, 84, 124, 202, 214, 217, 106, 159, 180, 109, 220, 130, 2, 190, 6, 85, 183, 134, 145, 39, 137, 49, 176, 205, 141, 158, 56, 79, 238, 21, 142, 0, 24, 83, 94, 11, 18, 5, 193, 231, 111, 198, 212, 46, 81, 221, 159, 76, 90, 172, 197, 128, 143, 132, 170, 191, 252, 181, 207, 3, 160, 142, 183, 219, 13, 226, 17, 254, 57, 171, 253, 26, 166, 90, 104, 231, 180, 55, 97, 101, 163, 213, 230, 201, 27, 77, 138, 80, 204, 85, 115, 161, 238, 184, 132, 184, 115, 66, 133, 226, 115, 229, 101, 6, 255, 39, 153, 225, 27, 61, 134, 145, 185, 9, 62, 184, 205, 109, 224, 83, 219, 37, 225, 32, 157, 157, 157, 157, 157, 157, 168, 56, 196, 85, 254, 56, 56, 157, 190, 157, 157, 157, 157, 157, 179, 168, 157, 157, 157, 157, 157, 168, 157, 190, 157, 157, 157, 157, 168, 157, 157, 157, 157, 168, 157, 157, 157, 157, 168, 157, 157, 168, 157, 190, 157, 157, 168, 157, 157, 157, 168, 157, 157, 168, 179, 168, 157, 190, 168, 157, 168, 157, 157, 168, 157, 157, 168, 157, 157, 179, 157, 157, 157, 168, 168, 179, 168, 168, 157, 168, 234, 157, 157, 157, 157, 196, 143, 56, 157, 157, 168, 157, 179, 157, 157, 179, 168, 157, 168, 157, 157, 157, 157, 157, 157, 157, 168, 190, 157, 168, 157, 157, 157, 157, 157, 157, 179, 157, 157, 168, 157, 168, 157, 157, 157, 157, 157, 157, 168, 179, 168, 168, 157, 190, 168, 168, 157, 157, 157, 157, 168, 157, 157, 157, 157, 157, 157, 157, 168, 157, 168, 234, 179, 157, 168, 157, 168, 168, 157, 168, 168, 157, 168, 157, 157, 168, 157, 168, 157, 179, 168, 179, 168, 157, 157, 157, 168, 168, 168, 157, 190, 157, 157, 201, 168, 168, 157, 157, 157, 157, 157, 157, 168, 157, 157, 157, 157, 157, 179, 157, 168, 157, 179, 168, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 179, 157, 168, 157, 157, 157, 157, 179, 179, 168, 143, 32, 172, 143, 114, 168, 168, 157, 179, 157, 168, 157, 168, 212, 157, 157, 157, 157, 179, 157, 157, 157, 157, 157, 157, 157, 157, 157, 168, 157, 157, 179, 157, 157, 157, 146, 65, 105, 117, 110, 171, 148, 172, 157, 157, 157, 179, 157, 157, 157, 168, 157, 61, 196, 61, 93, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 2, 2, 93, 2, 2, 2, 2, 2, 2, 2, 2, 2, 93, 93, 2, 93, 2, 2, 2, 2, 2, 2, 2, 2, 2, 19, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 93, 2, 93, 254, 143, 172, 85, 27, 225, 225, 196, 225, 114, 90, 225, 196, 27, 196, 2, 2, 2, 2, 2, 2, 93, 2, 2, 2, 2, 184, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 19, 2, 184, 2, 2, 2, 93, 2, 2, 2, 2, 2, 2, 2, 2, 2, 93, 93, 2, 2, 93, 2, 2, 2, 2, 2, 2, 93, 93, 2, 2, 2, 2, 2, 2, 218, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 15, 141, 141, 141, 206, 141, 206, 141, 15, 81, 81, 81, 15, 141, 171, 145, 63, 141, 206, 195, 134, 207, 9, 141, 141, 141, 87, 101, 60, 163, 213, 58, 17, 206, 141, 129, 8, 195, 134, 207, 9, 143, 92, 248, 19, 181, 253, 45, 179, 118, 108, 252, 81, 102, 204, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 141, 206, 200, 193, 141, 141, 141, 154, 141, 141, 206, 206, 206, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 141, 15, 206, 180, 154, 94, 184, 141, 206, 141, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 174, 231, 156, 81, 81, 81, 141, 141, 141, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 141, 141, 141, 206, 141, 141, 206, 34, 215, 151, 182, 29, 237, 141, 141, 141, 206, 81, 81, 81, 81, 81, 81, 81, 206, 206, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 81, 81, 81, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 206, 141, 141, 81, 81, 81, 81, 141, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 141, 206, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 81, 141, 81, 81, 141, 206, 206, 14, 233, 141, 141, 141, 141, 81, 174, 231, 141, 206, 206, 141, 206, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 81, 81, 206, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 81, 81, 81, 81, 81, 141, 206, 81, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 81, 81, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 81, 81, 206, 206, 141, 141, 141, 81, 81, 81, 81, 141, 81, 81, 81, 81, 156, 81, 81, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 81, 141, 81, 81, 81, 141, 206, 141, 141, 206, 141, 81, 141, 141, 155, 15, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 81, 141, 141, 141, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 206, 206, 14, 141, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 141, 206, 141, 141, 141, 141, 141, 141, 141, 206, 141, 188, 102, 121, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 141, 154, 141, 141, 141, 253, 45, 179, 118, 108, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 237, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 141, 141, 206, 141, 141, 15, 206, 195, 134, 207, 9, 141, 141, 141, 141, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 14, 141, 206, 206, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 141, 15, 141, 141, 81, 81, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 141, 81, 81, 81, 141, 71, 52, 224, 141, 206, 206, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 141, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 206, 145, 48, 188, 102, 179, 125, 86, 50, 141, 206, 206, 141, 80, 141, 141, 141, 141, 81, 81, 206, 81, 206, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 15, 141, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 206, 141, 81, 81, 81, 81, 141, 141, 141, 206, 141, 206, 141, 141, 141, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 141, 141, 141, 87, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 206, 141, 141, 22, 108, 141, 81, 81, 206, 219, 206, 141, 141, 141, 81, 81, 81, 81, 141, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 206, 80, 141, 141, 141, 154, 141, 141, 141, 141, 141, 206, 108, 141, 81, 81, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 141, 210, 141, 146, 205, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 81, 26, 2, 74, 22, 141, 141, 141, 141, 206, 141, 141, 141, 206, 141, 81, 81, 81, 206, 81, 141, 141, 81, 81, 81, 81, 2, 74, 22, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 206, 206, 141, 141, 206, 141, 141, 141, 141, 15, 206, 141, 141, 206, 195, 134, 207, 81, 81, 81, 81, 81, 141, 141, 141, 80, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 141, 141, 141, 141, 141, 15, 141, 206, 141, 141, 206, 141, 141, 81, 141, 141, 81, 15, 141, 141, 81, 81, 141, 141, 141, 206, 81, 141, 141, 206, 141, 141, 141, 81, 81, 81, 81, 81, 141, 141, 141, 141, 80, 141, 81, 81, 141, 141, 81, 26, 81, 81, 81, 81, 81, 81, 206, 206, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 15, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 206, 206, 141, 206, 206, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 81, 141, 141, 206, 81, 141, 206, 15, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 141, 141, 206, 206, 141, 145, 141, 206, 141, 141, 141, 141, 141, 141, 141, 15, 141, 141, 141, 15, 141, 141, 141, 206, 15, 141, 15, 141, 141, 81, 206, 206, 141, 141, 206, 206, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 141, 71, 52, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 141, 206, 141, 15, 141, 206, 15, 141, 145, 141, 206, 141, 15, 206, 206, 141, 141, 206, 206, 15, 141, 90, 85, 85, 172, 177, 85, 85, 3, 85, 85, 172, 85, 172, 85, 172, 95, 3, 85, 90, 85, 85, 85, 85, 85, 90, 3, 3, 85, 172, 85, 90})
fuzzDicts = append(fuzzDicts, []byte{55, 164, 48, 236, 138, 0, 161, 125, 86, 16, 40, 236, 140, 3, 47, 193, 140, 70, 28, 182, 7, 161, 23, 22, 85, 86, 191, 89, 56, 91, 146, 91, 161, 14, 129, 104, 70, 13, 6, 2, 45, 70, 140, 216, 244, 227, 121, 51, 72, 89, 196, 209, 112, 91, 156, 111, 124, 247, 9, 184, 37, 34, 169, 208, 189, 40, 42, 142, 68, 37, 223, 251, 58, 70, 175, 34, 3, 96, 84, 91, 65, 210, 8, 206, 72, 68, 203, 10, 217, 238, 138, 136, 8, 33, 1, 243, 0, 32, 12, 10, 135, 5, 131, 194, 185, 72, 44, 33, 150, 15, 4, 64, 1, 175, 38, 20, 218, 232, 18, 49, 200, 97, 8, 25, 99, 136, 33, 4, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 8, 0, 0, 0, 164, 203, 233, 101, 228, 185, 76, 28, 134, 73, 16, 83, 10, 41, 132, 12, 13, 201, 12, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 85, 85, 3, 172, 3, 172, 85, 3, 3, 149, 149, 149, 105, 161, 89, 141, 224, 175, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 153, 149, 149, 149, 149, 149, 149, 149, 87, 149, 149, 62, 149, 149, 149, 149, 149, 149, 76, 149, 186, 172, 172, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 37, 81, 161, 33, 149, 149, 149, 149, 231, 149, 79, 84, 149, 149, 248, 119, 6, 177, 149, 149, 149, 149, 149, 149, 238, 149, 149, 149, 178, 100, 124, 149, 149, 178, 100, 149, 149, 149, 203, 149, 149, 149, 149, 149, 149, 84, 149, 149, 149, 149, 0, 149, 149, 149, 149, 149, 37, 81, 161, 207, 149, 149, 149, 149, 149, 149, 149, 78, 224, 149, 149, 149, 149, 106, 85, 85, 85, 85, 85, 85, 85, 90, 177, 85, 177, 85, 8, 177, 85, 85, 3, 85, 85, 85, 85, 172, 85, 95, 172, 85, 85, 3, 172, 85, 8, 172, 85, 172, 85, 3, 177, 172, 3, 90, 172, 85, 172, 177, 85, 172, 85, 85, 85, 172, 172, 172, 172, 177, 85, 85, 85, 172, 3, 3, 182, 172, 172, 85, 90, 85, 3, 172, 3, 172, 85, 90, 3, 8, 85, 172, 172, 85, 90, 182, 90, 85, 85, 85, 95, 90, 85, 85, 182, 85, 85, 172, 85, 90, 177, 90, 172, 3, 172, 3, 172, 172, 90, 85, 85, 85, 177, 85, 85, 172, 85, 85, 85, 172, 85, 90, 3, 3, 85, 3, 172, 85, 85, 85, 85, 172, 172, 85, 90, 172, 172, 3, 172, 85, 13, 85, 3, 3, 172, 3, 13, 3, 172, 172, 3, 85, 85, 85, 85, 95, 90, 90, 177, 85, 95, 172, 85, 85, 3, 85, 85, 172, 85, 8, 85, 3, 85, 85, 172, 85, 85, 85, 85, 3, 85, 177, 85, 85, 3, 3, 172, 95, 172, 172, 8, 85, 172, 177, 172, 172, 172, 85, 8, 85, 8, 85, 85, 85, 3, 85, 85, 3, 172, 85, 3, 172, 85, 85, 3, 172, 172, 85, 85, 172, 177, 85, 254, 254, 254, 254, 72, 71, 72, 72, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 8, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 9, 5, 254, 254, 254, 254, 254, 254, 72, 72, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 71, 72, 72, 254, 254, 254, 254, 254, 72, 71, 71, 72, 254, 0, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 69, 72, 70, 72, 68, 72, 72, 72, 254, 254, 254, 9, 254, 14, 11, 7, 72, 71, 70, 72, 72, 72, 70, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 72, 71, 69, 72, 72, 72, 71, 71, 72, 70, 72, 72, 71, 72, 72, 72, 69, 71, 72, 71, 72, 71, 254, 254, 254, 254, 254, 254, 254, 254, 72, 72, 71, 71, 6, 6, 7, 14, 6, 254, 71, 72, 5, 254, 254, 9, 11, 7, 8, 2, 254, 254, 254, 254, 254, 254, 72, 71, 254, 6, 254, 254, 11, 9, 7, 2, 254, 254, 254, 72, 72, 69, 72, 72, 72, 72, 72, 71, 72, 72, 71, 67, 72, 72, 72, 71, 72, 71, 71, 11, 13, 254, 72, 72, 72, 72, 72, 72, 70, 70, 72, 71, 72, 69, 71, 72, 68, 71, 72, 72, 72, 72, 72, 70, 71, 72, 72, 72, 72, 69, 72, 70, 71, 72, 72, 72, 71, 72, 254, 12, 2, 254, 254, 12, 2, 13, 254, 11, 13, 254, 6, 254, 71, 72, 5, 254, 254, 9, 8, 14, 5, 254, 254, 254, 3, 14, 12, 8, 10, 254, 254, 254, 254, 254, 11, 13, 254, 254, 254, 254, 254, 5, 254, 254, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 146, 2, 53, 55, 10, 25, 104, 80, 22, 7, 249, 6, 180, 255, 141, 218, 159, 36, 6, 7, 14, 7, 14, 7, 14, 7, 14, 9, 14, 7, 14, 9, 14, 7, 14, 7, 14, 7, 14, 4, 7, 14, 7, 14, 7, 1, 9, 14, 7, 14, 7, 11, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 7, 7, 14, 9, 7, 7, 7, 14, 9, 6, 11, 14, 7, 14, 4, 10, 11, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 7, 14, 9, 7, 7, 7, 14, 7, 6, 4, 7, 14, 7, 7, 14, 9, 14, 4, 7, 14, 7, 7, 14, 9, 14, 7, 7, 7, 7, 7, 14, 7, 7, 14, 13, 14, 6, 4, 7, 14, 7, 7, 14, 9, 9, 9, 9, 9, 0, 10, 11, 14, 9, 9, 14, 7, 3, 7, 11, 14, 4, 7, 14, 7, 7, 7, 14, 9, 7, 7, 7, 1, 13, 7, 7, 7, 6, 5, 2, 7, 14, 7, 14, 9, 14, 7, 7, 14, 9, 3, 14, 14, 14, 14, 7, 3, 12, 15, 11, 14, 7, 14, 13, 5, 4, 10, 11, 14, 4, 5, 2, 9, 9, 0, 10, 11, 11, 1, 9, 14, 7, 7, 7, 14, 9, 7, 7, 7, 14, 9, 7, 7, 7, 14, 7, 6, 4, 7, 14, 7, 7, 14, 9, 14, 4, 7, 7, 2, 1, 9, 14, 14, 7, 7, 14, 9, 7, 3, 7, 14, 7, 150, 150, 150, 9, 14, 7, 14, 9, 150, 150, 150, 150, 150, 150, 14, 9, 14, 7, 7, 14, 21, 9, 7, 7, 4, 5, 2, 9, 150, 21, 150, 150, 150, 150, 7, 14, 9, 6, 11, 150, 150, 150, 150, 150, 150, 14, 4, 7, 14, 7, 7, 7, 14, 9, 7, 7, 14, 14, 14, 150, 150, 150, 21, 150, 150, 150, 150, 150, 7, 7, 7, 14, 7, 150, 150, 150, 21, 21, 150, 7, 14, 9, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 14, 9, 6, 11, 150, 150, 150, 150, 1, 9, 14, 7, 150, 150, 150, 150, 150, 21, 150, 7, 14, 9, 150, 150, 150, 150, 9, 7, 3, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 14, 7, 7, 7, 14, 9, 150, 150, 150, 150, 150, 150, 4, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 7, 14, 9, 6, 150, 150, 150, 150, 150, 9, 150, 3, 7, 14, 21, 150, 150, 21, 150, 21, 150, 14, 4, 7, 14, 150, 21, 150, 150, 150, 9, 150, 3, 150, 150, 150, 150, 150, 150, 150, 150, 14, 9, 14, 6, 5, 2, 7, 14, 7, 14, 150, 150, 150, 150, 150, 150, 150, 150, 3, 7, 11, 14, 4, 7, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 14, 9, 150, 150, 148, 150, 150, 21, 150, 150, 150, 150, 4, 10, 11, 14, 150, 150, 150, 150, 150, 150, 14, 7, 14, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 21, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 148, 150, 21, 150, 148, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 14, 14, 14, 14, 14, 14, 14, 150, 150, 150, 21, 150, 150, 150, 21, 150, 150, 21, 150, 150, 6, 4, 7, 14, 7, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 148, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 14, 14, 14, 7, 148, 150, 150, 148, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 9, 7, 7, 150, 150, 14, 9, 7, 7, 150, 21, 21, 9, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 148, 150, 150, 150, 150, 150, 150, 9, 7, 7, 7, 14, 7, 150, 150, 150, 150, 150, 150, 150, 7, 14, 9, 150, 150, 21, 14, 14, 14, 7, 150, 150, 150, 150, 150, 150, 7, 2, 1, 9, 14, 21, 150, 150, 150, 150, 14, 9, 14, 4, 7, 21, 21, 9, 14, 7, 150, 150, 150, 150, 150, 150, 7, 7, 14, 9, 150, 21, 21, 150, 21, 150, 150, 150, 21, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 21, 150, 150, 150, 150, 150, 150, 150, 150, 150, 7, 14, 7, 7, 150, 150, 150, 150, 150, 150, 21, 150, 150, 205, 29, 91, 131, 109, 39, 93, 115, 91, 197, 93, 150, 150, 150, 150, 173, 149, 92, 163, 150, 3, 7, 14, 21, 150, 150, 150, 150, 14, 9, 14, 4, 7, 199, 114, 175, 51, 45, 33, 44, 37, 231, 72, 243, 147, 192, 115, 15, 204, 38, 65, 155, 225, 234, 156, 164, 177, 214, 60, 241, 161, 31, 28, 10, 7, 183, 147, 45, 107, 215, 94, 209, 218, 205, 7, 128, 172, 156, 45, 122, 77, 36, 190, 219, 116, 65, 102, 191, 35, 255, 177, 166, 247, 180, 128, 205, 107, 13, 105, 40, 244, 117, 92, 69, 165, 6, 32, 247, 221, 25, 229, 138, 219, 81, 8, 1, 54, 159, 181, 154, 163, 121, 239, 118, 112, 60, 3, 46, 102, 182, 191, 13, 162, 247, 194, 21, 15, 229, 0, 19, 190, 148, 18, 233, 150, 150, 150, 150, 4, 184, 150, 1, 41, 71, 231, 205, 243, 111, 119, 226, 197, 41, 195, 26, 79, 164, 177, 214, 155, 19, 237, 25, 29, 150, 150, 14, 9, 14, 4, 186, 166, 254, 244, 69, 4, 191, 112, 248, 241, 102, 63, 194, 131, 47, 231, 193, 9, 111, 65, 102, 191, 35, 255, 177, 166, 197, 87, 241, 129, 82, 151, 173, 9, 109, 138, 150, 3, 7, 14, 21, 150, 43, 216, 146, 201, 55, 32, 196, 111, 131, 84, 153, 101, 150, 150, 150, 150, 14, 7, 7, 14, 21, 150, 150, 21, 150, 21, 150, 51, 35, 1, 9, 14, 21, 150, 150, 177, 208, 150, 150, 150, 150, 150, 14, 7, 7, 7, 14, 9, 150, 150, 116, 65, 102, 191, 35, 255, 89, 209, 78, 188, 47, 126, 170, 1, 217, 87, 122, 186, 180, 126, 122, 81, 60, 225, 89, 44, 204, 108, 250, 195, 7, 143, 212, 211, 217, 81, 129, 212, 174, 5, 2, 9, 9, 0, 10, 11, 11, 1, 9, 14, 7, 7, 7, 208, 208, 208, 208, 208, 208, 208, 208, 3, 99, 235, 235, 235, 15, 0, 0, 99, 14, 10, 14, 7, 0, 9, 2, 6, 5, 8, 3, 12, 218, 150, 99, 8, 8, 14, 6, 14, 14, 150, 8, 10, 184, 236, 201, 235, 8, 235, 167, 235, 133, 150, 201, 82, 184, 5, 82, 184, 235, 218, 167, 13, 14, 13, 235, 167, 150, 218, 218, 201, 10, 167, 201, 201, 133, 5, 7, 133, 184, 82, 3, 14, 4, 10, 13, 0, 11, 14, 218, 201, 235, 10, 5, 15, 3, 11, 9, 235, 235, 235, 9, 184, 219, 150, 167, 12, 218, 201, 218, 184, 8, 201, 99, 184, 2, 2, 6, 5, 235, 133, 1, 2, 235, 184, 31, 184, 12, 150, 167, 201, 201, 184, 201, 218, 10, 10, 6, 2, 2, 11, 3, 11, 1, 133, 116, 201, 5, 116, 167, 218, 10, 2, 235, 218, 218, 235, 31, 184, 235, 9, 15, 235, 218, 2, 1, 0, 8, 1, 5, 11, 9, 235, 133, 235, 10, 9, 11, 14, 31, 184, 65, 13, 6, 167, 218, 235, 235, 201, 235, 219, 116, 235, 201, 219, 201, 99, 8, 3, 7, 5, 6, 13, 133, 150, 218, 8, 4, 150, 218, 15, 12, 0, 11, 15, 7, 0, 201, 235, 184, 31, 235, 9, 1, 201, 235, 201, 235, 11, 6, 9, 4, 235, 235, 167, 201, 12, 218, 116, 99, 167, 116, 2, 4, 11, 5, 2, 5, 11, 235, 235, 9, 12, 13, 14, 8, 10, 184, 6, 2, 14, 7, 8, 15, 218, 184, 184, 6, 5, 99, 99, 235, 150, 5, 0, 184, 48, 9, 5, 15, 5, 201, 218, 235, 150, 150, 235, 31, 167, 150, 219, 235, 82, 12, 10, 235, 184, 201, 9, 2, 14, 15, 14, 10, 7, 10, 4, 15, 118, 191, 228, 154, 8, 117, 228, 45, 118, 118, 118, 228, 118, 45, 228, 228, 45, 228, 45, 81, 44, 81, 154, 44, 45, 191, 8, 45, 118, 228, 228, 118, 8, 191, 191, 45, 153, 118, 45, 79, 81, 45, 228, 45, 228, 45, 80, 81, 118, 7, 45, 227, 81, 153, 154, 154, 118, 78, 116, 228, 117, 118, 228, 227, 153, 154, 118, 45, 228, 118, 228, 7, 44, 227, 118, 228, 118, 228, 8, 228, 154, 118, 154, 228, 7, 44, 45, 81, 7, 228, 81, 44, 227, 8, 189, 81, 228, 45, 45, 154, 45, 154, 44, 80, 228, 8, 45, 154, 228, 45, 118, 8, 8, 118, 228, 118, 191, 191, 117, 154, 191, 191, 228, 118, 191, 45, 228, 8, 45, 191, 227, 188, 228, 118, 228, 191, 115, 45, 118, 118, 228, 8, 191, 188, 191, 45, 81, 118, 228, 118, 191, 8, 44, 118, 44, 45, 81, 228, 45, 118, 8, 45, 7, 8, 44, 8, 228, 8, 45, 45, 191, 8, 45, 227, 7, 45, 118, 5, 45, 81, 81, 43, 45, 191, 191, 228, 118, 8, 8, 191, 45, 44, 45, 228, 45, 118, 228, 45, 228, 228, 191, 226, 8, 154, 45, 153, 8, 81, 191, 79, 117, 44, 81, 190, 12, 150, 167, 201, 201, 184, 81, 45, 227, 154, 45, 228, 228, 81, 45, 228, 118, 8, 228, 81, 154, 44, 191, 45, 8, 45, 80, 45, 228, 117, 118, 227, 43, 154, 8, 227, 45, 45, 228, 228, 118, 45, 228, 118, 228, 45, 43, 228, 228, 152, 191, 227, 154, 81, 81, 45, 228, 45, 45, 117, 45, 227, 79, 45, 228, 118, 228, 226, 154, 228, 118, 81, 228, 227, 225, 154, 8, 228, 118, 228, 8, 45, 115, 118, 7, 45, 44, 118, 118, 190, 45, 45, 117, 118, 118, 118, 191, 45, 191, 8, 228, 45, 117, 228, 228, 45, 45, 45, 191, 191, 191, 45, 227, 8, 81, 154, 45, 228, 191, 227, 227, 81, 227, 44, 117, 117, 228, 228, 118, 154, 45, 45, 45, 118, 45, 45, 117, 227, 45, 191, 118, 118, 228, 228, 228, 81, 118, 228, 154, 7, 154, 80, 228, 191, 81, 228, 8, 228, 8, 118, 227, 45, 117, 153, 5, 3, 8, 2, 201, 117, 118, 191, 45, 118, 45, 228, 228, 191, 45, 45, 228, 228, 191, 115, 227, 117, 45, 118, 154, 81, 45, 118, 154, 45, 191, 8, 8, 44, 117, 8, 228, 191, 191, 228, 45, 190, 228, 228, 228, 45, 45, 118, 118, 226, 228, 118, 81, 8, 118, 191, 45, 228, 227, 45, 228, 154, 190, 191, 190, 228, 118, 228, 8, 117, 8, 227, 228, 81, 154, 118, 189, 118, 6, 191, 191, 45, 226, 228, 118, 118, 227, 191, 191, 191, 8, 8, 191, 8, 118, 228, 45, 81, 45, 45, 8, 228, 118, 8, 45, 191, 191, 8, 45, 118, 154, 228, 118, 81, 45, 45, 8, 45, 45, 45, 228, 191, 81, 153, 8, 191, 8, 191, 227, 190, 227, 228, 118, 228, 227, 228, 45, 191, 117, 118, 228, 45, 191, 8, 44, 45, 191, 191, 8, 8, 228, 118, 154, 118, 228, 45, 118, 228, 118, 117, 45, 228, 227, 118, 228, 228, 45, 228, 8, 118, 228, 228, 191, 81, 201, 218, 235, 235, 45, 154, 44, 153, 45, 228, 118, 118, 190, 8, 5, 235, 133, 1, 2, 191, 228, 228, 227, 227, 228, 228, 252, 252, 252, 252, 252, 225, 225, 228, 7, 44, 45, 252, 252, 0, 9, 2, 6, 5, 8, 3, 12, 218, 150, 99, 8, 252, 252, 235, 201, 218, 13, 235, 184, 167, 133, 218, 201, 4, 235, 218, 116, 48, 201, 184, 82, 184, 253, 235, 184, 201, 167, 133, 235, 116, 201, 218, 99, 218, 65, 99, 235, 167, 201, 218, 252, 252, 252, 15, 14, 14, 235, 150, 167, 8, 252, 225, 252, 184, 201, 218, 235, 201, 235, 235, 235, 9, 184, 219, 150, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 235, 9, 1, 201, 235, 201, 235, 11, 252, 235, 184, 184, 235, 235, 218, 235, 225, 252, 252, 198, 116, 2, 4, 11, 5, 2, 252, 236, 202, 218, 82, 235, 235, 167, 116, 225, 252, 225, 252, 252, 117, 8, 228, 191, 252, 252, 218, 235, 218, 184, 167, 133, 133, 201, 167, 235, 167, 201, 184, 167, 48, 218, 201, 236, 252, 252, 45, 228, 8, 45, 191, 227, 188, 7, 184, 235, 218, 167, 235, 235, 235, 14, 15, 167, 116, 201, 184, 184, 235, 99, 235, 252, 225, 252, 252, 252, 225, 252, 133, 184, 201, 184, 235, 82, 201, 167, 235, 225, 252, 252, 48, 9, 0, 11, 252, 252, 252, 252, 7, 184, 201, 218, 235, 201, 4, 252, 167, 201, 235, 14, 235, 201, 184, 218, 201, 201, 219, 201, 99, 116, 31, 201, 99, 133, 235, 1, 2, 11, 9, 9, 12, 116, 218, 235, 14, 3, 5, 218, 184, 236, 5, 184, 184, 184, 1, 252, 252, 252, 252, 252, 133, 167, 235, 235, 184, 235, 184, 167, 184, 150, 48, 218, 133, 219, 31, 201, 218, 235, 236, 235, 218, 167, 167, 133, 133, 201, 167, 235, 167, 201, 184, 167, 48, 218, 201, 252, 252, 252, 252, 252, 252, 252, 225, 225, 252, 15, 14, 5, 150, 235, 99, 13, 4, 133, 31, 184, 218, 10, 201, 218, 184, 5, 252, 252, 252, 252, 184, 167, 0, 235, 252, 252, 252, 252, 252, 252, 8, 8, 191, 8, 118, 228, 45, 81, 45, 45, 8, 228, 118, 8, 45, 191, 191, 8, 45, 118, 252, 252, 252, 252, 45, 228, 118, 8, 228, 81, 154, 44, 191, 45, 198, 252, 4, 3, 12, 65, 218, 218, 252, 252, 252, 252, 235, 184, 184, 235, 201, 235, 167, 133, 235, 252, 252, 252, 198, 252, 225, 198, 252, 225, 9, 10, 184, 252, 252, 252, 252, 4, 48, 235, 99, 0, 184, 235, 184, 15, 12, 218, 167, 167, 184, 1, 10, 5, 0, 11, 3, 15, 10, 235, 235, 184, 6, 1, 2, 252, 252, 252, 252, 252, 225, 252, 252, 252, 228, 227, 45, 228, 154, 190, 191, 190, 228, 118, 228, 8, 117, 8, 235, 6, 4, 0, 252, 252, 252, 252, 225, 252, 252, 252, 225, 252, 252, 201, 184, 11, 0, 218, 133, 219, 218, 252, 252, 99, 48, 235, 235, 184, 201, 65, 235, 116, 201, 235, 201, 133, 31, 82, 235, 218, 235, 65, 48, 150, 167, 150, 116, 82, 235, 201, 201, 167, 201, 218, 150, 218, 65, 252, 184, 219, 150, 167, 12, 252, 252, 225, 252, 252, 228, 118, 191, 8, 44, 118, 44, 45, 81, 228, 45, 118, 8, 45, 7, 8, 44, 8, 228, 8, 45, 45, 191, 133, 82, 184, 235, 184, 5, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 225, 252, 252, 252, 252, 252, 167, 201, 218, 252, 235, 218, 235, 167, 167, 235, 31, 65, 235, 184, 133, 150, 2, 1, 1, 7, 201, 133, 184, 7, 1, 15, 99, 235, 201, 218, 133, 218, 99, 8, 218, 235, 133, 235, 4, 4, 184, 150, 185, 2, 4, 117, 45, 227, 79, 45, 116, 32, 157, 159, 206, 71, 36, 114, 116, 73, 159, 116, 116, 114, 71, 243, 157, 71, 73, 157, 116, 157, 73, 32, 71, 114, 114, 116, 114, 71, 114, 159, 202, 116, 200, 114, 73, 116, 200, 75, 249, 243, 116, 30, 73, 200, 71, 114, 114, 116, 114, 71, 116, 114, 159, 161, 71, 243, 71, 116, 71, 116, 243, 243, 116, 71, 71, 71, 36, 32, 116, 116, 116, 116, 73, 251, 32, 114, 77, 34, 116, 116, 116, 71, 159, 32, 243, 71, 116, 114, 116, 116, 202, 159, 157, 116, 71, 116, 38, 243, 116, 114, 157, 116, 159, 200, 30, 114, 157, 159, 157, 243, 30, 36, 116, 32, 116, 114, 157, 114, 116, 161, 114, 114, 71, 32, 157, 73, 71, 116, 114, 157, 17, 116, 116, 116, 116, 157, 251, 32, 200, 114, 200, 243, 116, 116, 114, 157, 36, 116, 200, 116, 202, 77, 114, 71, 118, 30, 116, 32, 202, 159, 114, 114, 114, 116, 116, 157, 71, 32, 32, 71, 116, 30, 157, 200, 157, 204, 32, 36, 157, 116, 200, 118, 75, 159, 114, 71, 157, 202, 114, 114, 114, 157, 157, 116, 71, 73, 157, 116, 200, 159, 157, 116, 116, 114, 116, 243, 116, 71, 75, 116, 114, 116, 116, 71, 71, 116, 32, 116, 114, 116, 116, 208, 118, 116, 157, 116, 114, 116, 118, 77, 114, 114, 116, 157, 124, 32, 71, 71, 157, 116, 116, 120, 30, 114, 116, 114, 157, 247, 243, 116, 73, 116, 243, 159, 206, 71, 71, 36, 36, 243, 30, 73, 116, 116, 243, 118, 200, 200, 114, 159, 81, 159, 114, 30, 114, 206, 30, 32, 247, 116, 116, 71, 116, 116, 71, 32, 202, 114, 114, 157, 116, 118, 243, 71, 116, 71, 116, 116, 36, 17, 77, 157, 36, 36, 116, 159, 157, 116, 116, 34, 161, 36, 245, 116, 116, 116, 116, 32, 157, 165, 159, 71, 32, 32, 116, 116, 206, 30, 120, 114, 36, 36, 36, 159, 114, 114, 114, 200, 116, 116, 159, 157, 71, 116, 32, 157, 202, 36, 116, 157, 116, 116, 116, 114, 71, 114, 200, 202, 116, 206, 71, 116, 116, 71, 71, 116, 116, 116, 36, 114, 71, 32, 243, 32, 71, 157, 71, 200, 245, 116, 32, 71, 157, 71, 36, 32, 116, 116, 114, 157, 200, 77, 114, 114, 71, 30, 118, 32, 157, 75, 114, 116, 116, 251, 32, 116, 114, 36, 116, 36, 114, 114, 30, 71, 30, 116, 30, 114, 116, 116, 73, 116, 30, 73, 71, 30, 157, 157, 114, 116, 200, 73, 116, 157, 71, 118, 116, 32, 157, 73, 116, 116, 32, 157, 159, 243, 71, 71, 71, 210, 114, 73, 116, 159, 243, 36, 116, 114, 71, 71, 157, 71, 34, 200, 157, 200, 157, 30, 159, 202, 116, 71, 34, 114, 73, 71, 116, 71, 71, 243, 202, 116, 114, 116, 114, 159, 30, 116, 73, 157, 245, 116, 200, 114, 114, 200, 116, 114, 116, 116, 116, 200, 200, 73, 71, 114, 251, 32, 114, 157, 36, 116, 200, 157, 124, 114, 71, 157, 251, 36, 243, 114, 157, 32, 243, 116, 116, 200, 202, 120, 159, 159, 206, 75, 200, 204, 116, 71, 71, 114, 73, 71, 200, 118, 71, 32, 116, 159, 73, 116, 116, 73, 71, 30, 157, 157, 73, 200, 200, 157, 200, 247, 243, 73, 71, 75, 36, 71, 32, 36, 157, 116, 245, 30, 73, 71, 116, 159, 116, 71, 116, 32, 157, 159, 30, 202, 32, 116, 116, 159, 32, 159, 159, 200, 32, 36, 114, 32, 71, 200, 71, 114, 243, 200, 73, 159, 243, 75, 30, 200, 32, 116, 71, 161, 116, 116, 116, 71, 159, 116, 71, 32, 159, 243, 116, 159, 118, 71, 157, 116, 36, 36, 114, 116, 32, 157, 73, 30, 73, 116, 116, 116, 157, 36, 116, 157, 202, 30, 114, 157, 251, 243, 157, 75, 114, 114, 71, 167, 116, 243, 157, 243, 157, 116, 202, 71, 32, 114, 73, 157, 116, 157, 116, 157, 71, 245, 32, 114, 116, 114, 200, 247, 243, 116, 116, 116, 32, 71, 114, 114, 157, 159, 116, 243, 32, 36, 36, 36, 243, 204, 71, 34, 159, 251, 157, 71, 32, 200, 114, 157, 73, 71, 202, 116, 114, 249, 73, 32, 157, 73, 116, 116, 157, 116, 71, 200, 73, 157, 243, 118, 114, 157, 251, 30, 116, 71, 116, 71, 116, 245, 114, 157, 243, 116, 71, 116, 114, 116, 200, 71, 114, 116, 116, 71, 202, 116, 73, 71, 116, 71, 159, 114, 71, 73, 71, 75, 116, 116, 116, 116, 116, 157, 71, 247, 245, 34, 157, 116, 114, 157, 71, 116, 200, 73, 157, 71, 157, 157, 249, 200, 116, 116, 116, 116, 71, 116, 116, 157, 157, 116, 114, 157, 116, 71, 157, 116, 71, 116, 116, 157, 116, 114, 116, 116, 30, 202, 116, 73, 30, 116, 157, 116, 114, 157, 251, 114, 159, 114, 122, 30, 32, 116, 73, 116, 157, 71, 32, 116, 116, 71, 114, 157, 116, 167, 116, 116, 30, 71, 114, 116, 243, 116, 71, 116, 73, 200, 161, 116, 30, 116, 114, 157, 114, 114, 116, 157, 243, 17, 73, 71, 116, 120, 34, 116, 202, 243, 71, 71, 116, 32, 116, 116, 116, 116, 245, 30, 71, 73, 71, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 116, 167, 116, 32, 116, 116, 73, 116, 71, 36, 116, 32, 243, 30, 116, 200, 243, 114, 157, 159, 30, 157, 116, 118, 114, 118, 200, 206, 30, 71, 157, 32, 118, 71, 116, 71, 36, 116, 114, 157, 30, 114, 114, 116, 200, 161, 114, 114, 73, 32, 116, 116, 116, 73, 116, 116, 71, 30, 116, 116, 116, 116, 116, 116, 157, 116, 200, 116, 116, 116, 116, 71, 71, 206, 116, 73, 114, 116, 116, 116, 116, 114, 73, 157, 114, 116, 116, 116, 116, 71, 116, 116, 71, 71, 34, 114, 32, 116, 157, 71, 116, 73, 114, 116, 116, 116, 71, 116, 243, 116, 114, 206, 71, 116, 71, 116, 157, 30, 202, 243, 32, 159, 116, 116, 71, 116, 157, 114, 71, 114, 157, 249, 116, 116, 202, 114, 75, 73, 116, 73, 71, 159, 116, 71, 116, 32, 157, 159, 30, 71, 157, 71, 32, 157, 161, 114, 30, 30, 116, 116, 71, 71, 36, 116, 245, 36, 32, 116, 30, 202, 206, 159, 30, 202, 157, 71, 159, 116, 116, 116, 157, 200, 75, 30, 30, 114, 159, 30, 71, 71, 114, 200, 200, 159, 245, 116, 116, 116, 30, 71, 30, 114, 116, 200, 71, 73, 71, 73, 116, 73, 75, 159, 206, 32, 200, 245, 34, 116, 73, 157, 114, 157, 30, 116, 114, 159, 116, 159, 243, 71, 200, 159, 202, 116, 71, 243, 243, 206, 71, 157, 200, 30, 114, 157, 159, 116, 116, 200, 245, 83, 243, 36, 114, 116, 71, 114, 116, 30, 71, 30, 71, 116, 157, 114, 118, 116, 114, 116, 116, 243, 157, 116, 114, 157, 251, 116, 116, 157, 251, 32, 32, 245, 114, 73, 114, 159, 30, 32, 116, 116, 114, 157, 116, 116, 30, 73, 71, 206, 71, 116, 116, 71, 32, 114, 159, 79, 116, 116, 116, 116, 157, 116, 167, 116, 116, 116, 116, 116, 200, 73, 114, 73, 116, 116, 243, 202, 71, 73, 32, 116, 247, 251, 32, 116, 251, 118, 30, 116, 77, 208, 157, 32, 116, 116, 116, 73, 200, 251, 116, 71, 157, 30, 157, 251, 32, 202, 202, 71, 159, 243, 159, 30, 71, 200, 71, 116, 157, 34, 116, 116, 116, 71, 36, 71, 71, 116, 159, 114, 202, 116, 116, 251, 116, 114, 206, 204, 71, 157, 159, 30, 202, 32, 116, 116, 159, 71, 30, 243, 114, 157, 114, 114, 116, 32, 157, 165, 159, 206, 32, 114, 116, 159, 159, 202, 116, 71, 116, 114, 118, 71, 114, 116, 114, 114, 243, 32, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 71, 157, 116, 157, 71, 32, 118, 116, 116, 71, 116, 73, 114, 116, 116, 71, 116, 114, 116, 71, 116, 71, 116, 114, 157, 159, 202, 116, 200, 116, 71, 157, 118, 30, 245, 114, 71, 30, 157, 116, 71, 71, 83, 32, 114, 116, 114, 71, 243, 245, 32, 114, 114, 157, 73, 167, 116, 114, 73, 17, 116, 202, 71, 116, 202, 116, 243, 71, 116, 159, 71, 243, 17, 210, 73, 200, 114, 116, 163, 73, 32, 116, 116, 116, 116, 157, 157, 200, 30, 73, 247, 200, 114, 157, 73, 71, 114, 157, 159, 202, 116, 114, 71, 116, 120, 157, 200, 116, 202, 157, 71, 114, 200, 77, 114, 116, 116, 71, 73, 243, 32, 73, 243, 200, 249, 116, 114, 116, 157, 71, 71, 157, 251, 36, 116, 157, 157, 243, 157, 116, 71, 73, 157, 118, 200, 116, 157, 114, 157, 71, 32, 116, 114, 157, 116, 114, 116, 116, 116, 36, 116, 71, 157, 116, 116, 114, 73, 159, 202, 116, 116, 114, 116, 116, 247, 116, 116, 204, 73, 71, 116, 32, 157, 159, 243, 71, 71, 157, 114, 116, 114, 71, 114, 114, 157, 118, 157, 30, 116, 116, 116, 116, 71, 71, 204, 116, 157, 116, 116, 114, 73, 118, 71, 157, 163, 36, 116, 116, 71, 116, 114, 200, 114, 116, 75, 200, 75, 116, 116, 116, 161, 81, 114, 116, 116, 124, 71, 116, 157, 34, 157, 116, 36, 71, 251, 243, 200, 116, 114, 202, 204, 243, 120, 157, 116, 167, 116, 32, 116, 114, 71, 157, 159, 206, 71, 116, 71, 161, 71, 200, 202, 116, 116, 116, 120, 157, 32, 116, 71, 165, 206, 116, 116, 73, 157, 114, 251, 32, 157, 32, 243, 71, 30, 114, 116, 71, 30, 157, 71, 116, 71, 116, 118, 243, 157, 116, 202, 157, 202, 73, 116, 116, 116, 114, 116, 71, 251, 243, 116, 200, 116, 71, 71, 116, 73, 71, 116, 71, 114, 157, 128, 118, 71, 116, 114, 114, 71, 73, 116, 116, 157, 71, 17, 71, 114, 157, 116, 167, 116, 32, 116, 243, 202, 157, 157, 157, 30, 116, 73, 157, 116, 71, 116, 157, 116, 114, 116, 116, 116, 116, 71, 71, 32, 116, 73, 116, 157, 200, 157, 116, 114, 116, 114, 30, 30, 116, 71, 163, 73, 116, 116, 116, 116, 114, 116, 116, 114, 73, 116, 73, 159, 116, 243, 118, 251, 247, 247, 71, 75, 30, 73, 116, 116, 116, 32, 114, 114, 71, 32, 251, 200, 202, 114, 116, 114, 116, 202, 114, 159, 17, 116, 30, 114, 114, 116, 73, 116, 71, 32, 114, 157, 251, 116, 116, 118, 114, 71, 30, 114, 34, 116, 36, 116, 114, 157, 251, 32, 116, 116, 208, 206, 159, 251, 32, 204, 161, 71, 73, 71, 206, 114, 157, 116, 116, 243, 243, 116, 71, 116, 243, 247, 30, 30, 114, 116, 71, 116, 126, 202, 114, 116, 200, 243, 114, 157, 116, 116, 114, 116, 114, 71, 114, 116, 116, 114, 206, 114, 157, 73, 167, 116, 243, 116, 251, 32, 71, 71, 157, 71, 116, 116, 71, 200, 202, 114, 114, 116, 73, 116, 116, 116, 116, 71, 116, 243, 116, 251, 32, 116, 206, 159, 251, 32, 73, 116, 116, 157, 116, 114, 116, 116, 116, 116, 116, 116, 71, 36, 114, 116, 71, 116, 71, 202, 200, 71, 32, 116, 116, 73, 161, 30, 114, 157, 251, 114, 116, 71, 114, 116, 71, 73, 32, 116, 245, 71, 200, 75, 200, 116, 116, 116, 116, 71, 114, 116, 116, 116, 116, 116, 81, 116, 71, 114, 116, 245, 200, 116, 116, 71, 116, 157, 200, 159, 245, 116, 116, 116, 30, 71, 30, 114, 116, 157, 114, 161, 245, 167, 159, 200, 32, 73, 71, 206, 71, 114, 200, 71, 71, 200, 71, 114, 71, 73, 116, 114, 157, 251, 32, 114, 77, 200, 32, 245, 200, 202, 116, 243, 71, 116, 73, 245, 71, 245, 200, 30, 251, 32, 157, 157, 157, 32, 157, 200, 114, 71, 159, 32, 127, 200, 157, 71, 210, 245, 120, 157, 253, 73, 200, 71, 114, 157, 116, 200, 77, 114, 116, 116, 71, 73, 157, 116, 114, 116, 30, 159, 118, 116, 32, 157, 200, 157, 200, 71, 75, 73, 116, 116, 243, 157, 116, 114, 114, 114, 116, 200, 71, 245, 118, 157, 159, 71, 157, 116, 247, 30, 71, 116, 71, 30, 30, 204, 245, 157, 71, 157, 200, 71, 116, 157, 36, 116, 71, 30, 116, 116, 116, 114, 73, 116, 243, 243, 116, 73, 157, 116, 75, 114, 73, 116, 71, 71, 206, 159, 163, 157, 159, 71, 36, 159, 30, 202, 157, 116, 114, 157, 159, 206, 71, 116, 71, 116, 243, 71, 32, 71, 73, 157, 116, 114, 116, 157, 200, 30, 32, 71, 118, 114, 116, 157, 157, 116, 30, 114, 116, 116, 32, 114, 114, 71, 159, 159, 157, 36, 159, 200, 116, 202, 114, 114, 116, 71, 116, 243, 243, 116, 71, 116, 73, 114, 32, 114, 32, 245, 114, 79, 157, 200, 114, 116, 114, 243, 157, 200, 71, 157, 116, 36, 71, 116, 71, 30, 200, 157, 71, 32, 116, 114, 157, 251, 114, 116, 73, 73, 243, 200, 116, 116, 116, 114, 30, 116, 157, 243, 157, 202, 71, 200, 159, 114, 73, 116, 116, 71, 114, 243, 71, 114, 114, 116, 114, 159, 30, 116, 30, 157, 116, 243, 200, 114, 71, 243, 157, 116, 116, 200, 71, 116, 159, 114, 204, 116, 114, 243, 161, 157, 32, 114, 73, 159, 32, 114, 114, 116, 116, 116, 200, 34, 116, 30, 116, 71, 30, 75, 116, 116, 200, 36, 116, 116, 116, 71, 157, 116, 159, 157, 32, 32, 116, 116, 71, 116, 71, 116, 116, 114, 245, 157, 251, 116, 157, 116, 32, 157, 202, 116, 114, 73, 71, 116, 202, 243, 245, 116, 251, 116, 163, 30, 114, 34, 114, 73, 71, 116, 157, 71, 243, 202, 116, 114, 116, 114, 159, 30, 116, 200, 73, 114, 202, 163, 157, 243, 159, 159, 159, 116, 32, 36, 71, 71, 30, 116, 114, 116, 116, 118, 114, 71, 114, 73, 116, 251, 32, 116, 114, 243, 206, 118, 17, 116, 159, 116, 71, 116, 71, 116, 202, 157, 116, 116, 116, 116, 34, 157, 114, 71, 200, 202, 116, 243, 71, 116, 243, 157, 251, 32, 114, 243, 243, 36, 36, 116, 114, 157, 251, 32, 116, 36, 36, 116, 114, 157, 200, 116, 71, 116, 114, 32, 159, 116, 116, 114, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 114, 116, 114, 157, 251, 32, 75, 36, 36, 116, 114, 116, 200, 159, 202, 71, 116, 116, 77, 200, 30, 163, 157, 161, 114, 114, 73, 73, 71, 116, 30, 114, 116, 116, 116, 116, 71, 200, 159, 165, 159, 206, 32, 114, 159, 75, 200, 116, 243, 71, 116, 73, 114, 116, 245, 200, 116, 71, 116, 116, 200, 118, 200, 71, 71, 36, 116, 73, 73, 116, 116, 71, 114, 75, 30, 73, 116, 116, 116, 116, 30, 243, 159, 71, 116, 251, 116, 36, 116, 114, 159, 71, 36, 71, 243, 159, 71, 116, 114, 116, 206, 71, 114, 157, 73, 116, 116, 71, 116, 71, 114, 243, 32, 73, 73, 30, 71, 116, 71, 116, 167, 116, 157, 157, 32, 30, 114, 157, 116, 71, 114, 157, 200, 116, 114, 114, 116, 73, 116, 116, 159, 114, 114, 116, 73, 114, 116, 206, 36, 116, 73, 200, 116, 73, 114, 114, 36, 114, 32, 114, 114, 157, 114, 114, 71, 71, 206, 114, 17, 116, 116, 73, 30, 157, 116, 116, 116, 116, 71, 245, 116, 30, 73, 71, 159, 116, 71, 116, 202, 73, 71, 116, 71, 206, 116, 73, 114, 36, 71, 116, 71, 116, 114, 118, 116, 157, 120, 71, 116, 32, 116, 73, 73, 247, 114, 204, 116, 114, 204, 116, 114, 116, 116, 116, 77, 116, 114, 114, 200, 114, 116, 116, 243, 159, 114, 36, 36, 73, 116, 116, 71, 116, 73, 71, 71, 71, 71, 36, 32, 116, 116, 116, 159, 116, 30, 116, 157, 116, 116, 71, 30, 243, 206, 200, 114, 73, 114, 73, 116, 251, 32, 116, 114, 243, 206, 71, 114, 73, 71, 114, 114, 157, 114, 114, 114, 114, 157, 116, 114, 243, 114, 116, 200, 161, 157, 116, 114, 71, 245, 30, 159, 32, 200, 116, 118, 243, 116, 73, 71, 157, 71, 116, 116, 114, 200, 114, 157, 71, 32, 116, 71, 116, 157, 114, 116, 114, 116, 71, 116, 116, 157, 116, 116, 116, 73, 32, 157, 116, 116, 71, 116, 116, 116, 116, 114, 116, 73, 73, 157, 114, 157, 71, 32, 114, 32, 157, 116, 200, 75, 116, 36, 36, 116, 116, 245, 116, 71, 30, 73, 30, 116, 243, 114, 71, 114, 116, 116, 116, 116, 116, 116, 30, 249, 116, 116, 73, 71, 116, 71, 116, 114, 73, 71, 116, 36, 36, 36, 157, 157, 73, 116, 116, 116, 167, 116, 116, 157, 243, 157, 116, 116, 116, 163, 30, 36, 32, 116, 36, 116, 71, 157, 116, 114, 116, 157, 200, 200, 200, 114, 118, 32, 73, 116, 114, 73, 243, 30, 73, 71, 116, 159, 116, 71, 116, 32, 157, 159, 30, 202, 32, 116, 116, 159, 71, 30, 243, 114, 157, 114, 114, 116, 32, 157, 165, 159, 206, 32, 114, 116, 159, 159, 202, 116, 71, 116, 114, 118, 71, 114, 116, 114, 114, 243, 32, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 71, 157, 116, 157, 71, 32, 118, 71, 114, 114, 32, 116, 114, 157, 75, 202, 161, 114, 114, 71, 157, 243, 114, 243, 73, 245, 116, 114, 71, 30, 157, 71, 118, 71, 118, 204, 116, 114, 157, 30, 75, 243, 161, 75, 114, 116, 159, 116, 36, 116, 30, 159, 71, 116, 116, 114, 157, 243, 243, 71, 116, 157, 36, 36, 116, 159, 114, 200, 114, 157, 32, 114, 251, 243, 71, 200, 71, 118, 114, 116, 157, 116, 114, 157, 71, 159, 34, 75, 206, 71, 116, 116, 71, 71, 116, 116, 116, 36, 114, 71, 32, 243, 32, 71, 157, 71, 200, 245, 116, 32, 71, 157, 71, 36, 32, 116, 116, 114, 157, 200, 77, 114, 114, 71, 30, 118, 32, 157, 75, 114, 116, 116, 116, 73, 159, 116, 116, 243, 116, 116, 167, 116, 71, 116, 116, 157, 114, 116, 200, 73, 116, 71, 116, 116, 71, 116, 116, 116, 71, 71, 32, 116, 73, 116, 157, 71, 32, 157, 116, 71, 73, 81, 159, 114, 30, 157, 30, 114, 32, 71, 71, 116, 116, 118, 116, 116, 114, 159, 30, 116, 73, 30, 116, 116, 114, 116, 116, 71, 71, 116, 32, 116, 114, 116, 122, 243, 77, 34, 247, 243, 163, 159, 157, 71, 116, 30, 200, 200, 200, 157, 251, 118, 30, 116, 77, 208, 157, 200, 114, 118, 116, 32, 243, 247, 157, 157, 73, 114, 36, 36, 36, 114, 251, 32, 202, 116, 71, 36, 200, 116, 116, 116, 116, 157, 114, 73, 71, 245, 157, 71, 243, 202, 157, 71, 116, 243, 116, 30, 161, 200, 73, 202, 200, 116, 120, 157, 116, 73, 200, 116, 71, 75, 200, 71, 71, 116, 32, 200, 30, 116, 202, 116, 116, 157, 114, 116, 116, 114, 157, 116, 114, 157, 161, 32, 116, 116, 71, 157, 118, 204, 71, 71, 116, 243, 116, 73, 159, 159, 71, 116, 71, 116, 116, 116, 114, 75, 116, 202, 71, 159, 71, 32, 73, 243, 200, 202, 116, 71, 159, 71, 32, 73, 114, 157, 251, 32, 157, 157, 116, 114, 30, 116, 116, 116, 73, 79, 157, 243, 71, 116, 167, 30, 202, 32, 118, 71, 114, 32, 116, 114, 157, 251, 114, 114, 32, 116, 114, 73, 71, 71, 114, 116, 114, 116, 114, 116, 30, 32, 116, 202, 71, 157, 157, 249, 116, 116, 116, 116, 157, 114, 116, 243, 116, 71, 200, 71, 116, 116, 167, 71, 114, 116, 116, 118, 114, 71, 114, 116, 36, 71, 36, 36, 32, 157, 116, 200, 32, 200, 116, 73, 159, 243, 159, 116, 71, 114, 73, 71, 73, 71, 116, 71, 32, 118, 114, 161, 116, 114, 30, 114, 114, 114, 116, 116, 118, 116, 116, 243, 116, 116, 116, 245, 157, 200, 157, 116, 114, 116, 116, 157, 251, 114, 116, 114, 200, 200, 114, 159, 159, 159, 206, 243, 32, 116, 116, 116, 116, 30, 36, 165, 159, 206, 32, 114, 159, 75, 200, 116, 73, 245, 71, 251, 114, 116, 114, 251, 200, 159, 114, 71, 114, 200, 202, 163, 116, 245, 114, 71, 208, 157, 77, 71, 114, 200, 114, 32, 114, 114, 114, 116, 73, 116, 116, 200, 114, 75, 116, 114, 36, 116, 114, 116, 30, 200, 75, 243, 159, 251, 116, 116, 116, 157, 243, 36, 114, 116, 116, 36, 36, 116, 114, 157, 251, 30, 116, 71, 116, 116, 116, 71, 116, 36, 116, 114, 243, 118, 75, 116, 116, 116, 71, 71, 208, 159, 251, 32, 116, 157, 251, 32, 157, 116, 32, 157, 249, 157, 251, 32, 116, 32, 247, 157, 32, 36, 114, 116, 73, 116, 71, 116, 32, 36, 36, 36, 36, 116, 114, 116, 71, 116, 116, 116, 116, 71, 116, 159, 116, 116, 36, 36, 116, 116, 116, 116, 116, 71, 116, 200, 71, 114, 71, 116, 30, 120, 114, 36, 36, 36, 36, 116, 32, 116, 114, 157, 114, 116, 114, 71, 208, 206, 159, 251, 32, 116, 114, 116, 116, 116, 116, 116, 116, 116, 116, 120, 157, 32, 116, 71, 116, 157, 165, 159, 206, 32, 116, 116, 159, 71, 116, 71, 116, 36, 116, 114, 243, 159, 71, 116, 114, 116, 116, 114, 200, 116, 157, 116, 36, 71, 116, 251, 116, 116, 114, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 159, 206, 71, 116, 30, 116, 116, 206, 163, 116, 116, 116, 159, 73, 116, 30, 73, 71, 116, 116, 116, 32, 36, 36, 116, 114, 116, 116, 157, 116, 114, 114, 116, 202, 71, 159, 157, 32, 116, 71, 71, 243, 114, 114, 116, 73, 116, 200, 157, 157, 73, 73, 161, 116, 71, 116, 114, 116, 116, 73, 204, 116, 114, 157, 71, 116, 116, 114, 116, 30, 73, 71, 116, 159, 116, 116, 116, 159, 71, 34, 157, 116, 200, 116, 71, 116, 157, 116, 116, 71, 71, 30, 114, 200, 118, 116, 32, 157, 73, 116, 71, 116, 116, 157, 116, 114, 116, 116, 118, 116, 116, 116, 116, 71, 116, 116, 30, 114, 122, 159, 202, 116, 243, 116, 116, 114, 114, 161, 114, 206, 71, 116, 116, 202, 157, 243, 247, 36, 71, 245, 71, 251, 114, 159, 159, 34, 38, 243, 116, 200, 200, 251, 75, 157, 157, 73, 157, 243, 202, 243, 157, 116, 116, 243, 116, 114, 116, 114, 157, 200, 30, 114, 116, 116, 159, 157, 75, 200, 73, 79, 200, 157, 200, 243, 116, 114, 116, 159, 157, 116, 114, 30, 114, 116, 116, 116, 245, 200, 71, 157, 157, 114, 157, 251, 32, 161, 157, 116, 159, 159, 116, 116, 200, 71, 32, 73, 116, 116, 71, 114, 114, 73, 30, 116, 116, 116, 118, 116, 157, 71, 116, 157, 206, 77, 71, 30, 249, 32, 114, 73, 114, 251, 32, 251, 200, 202, 114, 116, 114, 116, 202, 114, 159, 114, 116, 116, 120, 116, 30, 71, 206, 116, 118, 73, 71, 116, 116, 114, 73, 73, 159, 202, 116, 71, 116, 116, 116, 120, 30, 73, 71, 116, 200, 243, 71, 243, 202, 243, 73, 116, 159, 71, 202, 159, 116, 116, 36, 36, 116, 157, 73, 206, 251, 116, 200, 36, 157, 116, 245, 116, 116, 114, 120, 157, 32, 116, 114, 73, 202, 114, 157, 251, 32, 116, 245, 71, 116, 159, 30, 71, 114, 159, 79, 30, 71, 73, 114, 71, 71, 202, 116, 116, 116, 114, 157, 159, 30, 30, 116, 34, 116, 116, 36, 157, 116, 167, 116, 71, 116, 200, 71, 116, 159, 114, 204, 116, 114, 243, 161, 157, 32, 114, 73, 159, 32, 114, 114, 116, 116, 116, 200, 34, 116, 30, 116, 71, 30, 75, 116, 116, 71, 114, 73, 159, 73, 157, 118, 169, 116, 30, 116, 75, 159, 116, 73, 116, 116, 73, 202, 114, 73, 73, 114, 157, 251, 200, 73, 159, 202, 73, 159, 36, 36, 243, 157, 34, 36, 114, 116, 116, 71, 200, 159, 71, 200, 157, 157, 116, 245, 116, 243, 243, 71, 116, 116, 157, 116, 114, 116, 200, 200, 114, 118, 116, 114, 116, 157, 159, 116, 118, 114, 118, 71, 116, 114, 114, 71, 114, 75, 114, 32, 71, 200, 75, 71, 73, 116, 32, 247, 114, 116, 30, 71, 32, 200, 116, 114, 30, 75, 243, 71, 116, 202, 200, 200, 71, 206, 159, 251, 30, 157, 114, 116, 157, 114, 116, 116, 116, 71, 116, 32, 157, 71, 116, 200, 157, 200, 116, 71, 157, 118, 204, 71, 243, 116, 159, 159, 206, 116, 116, 71, 71, 206, 71, 114, 157, 73, 116, 116, 71, 116, 71, 159, 161, 114, 114, 71, 71, 206, 116, 200, 116, 114, 157, 251, 32, 116, 116, 157, 251, 32, 32, 114, 159, 32, 157, 116, 157, 71, 32, 116, 71, 116, 116, 116, 71, 159, 114, 71, 116, 200, 71, 116, 116, 157, 71, 32, 118, 71, 71, 116, 32, 73, 71, 116, 200, 116, 71, 116, 116, 116, 116, 116, 116, 71, 116, 73, 157, 32, 157, 200, 30, 75, 200, 116, 200, 116, 200, 116, 30, 73, 71, 116, 159, 116, 71, 116, 202, 73, 71, 116, 71, 159, 114, 114, 116, 73, 116, 116, 116, 30, 202, 157, 116, 71, 118, 116, 32, 157, 73, 32, 116, 114, 157, 253, 30, 116, 116, 71, 116, 32, 157, 73, 116, 157, 71, 32, 71, 116, 116, 157, 245, 116, 116, 71, 116, 159, 114, 202, 116, 116, 251, 32, 116, 114, 206, 204, 71, 116, 71, 114, 114, 36, 116, 116, 116, 116, 202, 71, 159, 32, 116, 114, 157, 251, 32, 114, 157, 159, 116, 243, 116, 73, 73, 71, 116, 114, 157, 161, 157, 75, 122, 73, 71, 71, 116, 116, 71, 116, 71, 116, 157, 251, 32, 116, 245, 71, 116, 116, 116, 71, 71, 116, 116, 114, 114, 116, 243, 116, 202, 116, 73, 73, 116, 114, 251, 32, 157, 159, 202, 116, 116, 116, 116, 30, 116, 116, 200, 114, 157, 116, 243, 116, 73, 73, 71, 116, 71, 71, 157, 200, 71, 116, 32, 36, 73, 159, 116, 157, 200, 32, 36, 36, 36, 36, 116, 36, 114, 114, 116, 116, 71, 114, 157, 36, 116, 71, 159, 114, 114, 116, 157, 114, 116, 71, 30, 116, 30, 30, 202, 116, 116, 157, 71, 116, 116, 157, 116, 114, 114, 71, 243, 206, 71, 157, 243, 116, 157, 157, 116, 73, 243, 159, 157, 124, 116, 114, 157, 157, 251, 243, 157, 114, 157, 251, 32, 116, 116, 159, 71, 157, 116, 247, 116, 71, 116, 36, 116, 71, 157, 116, 157, 116, 114, 157, 73, 116, 116, 200, 159, 116, 38, 157, 116, 116, 30, 73, 71, 206, 71, 116, 159, 157, 159, 202, 73, 202, 159, 120, 36, 116, 116, 118, 157, 116, 114, 71, 159, 114, 116, 118, 114, 116, 116, 73, 116, 116, 157, 116, 116, 116, 30, 114, 71, 157, 200, 116, 116, 116, 116, 73, 30, 34, 36, 36, 116, 200, 30, 32, 116, 114, 206, 243, 71, 114, 157, 251, 116, 116, 114, 200, 202, 251, 32, 116, 73, 114, 114, 30, 243, 116, 159, 71, 116, 120, 157, 32, 116, 243, 159, 30, 32, 206, 116, 116, 116, 30, 73, 200, 116, 200, 157, 243, 247, 159, 206, 204, 116, 116, 116, 71, 200, 159, 116, 116, 116, 116, 116, 243, 116, 116, 116, 245, 116, 71, 116, 157, 34, 243, 71, 116, 116, 157, 159, 200, 32, 116, 116, 114, 159, 71, 157, 71, 32, 157, 161, 114, 30, 30, 34, 159, 163, 247, 159, 114, 245, 114, 32, 200, 157, 159, 200, 245, 114, 204, 116, 114, 157, 200, 200, 116, 116, 116, 202, 116, 116, 30, 116, 114, 157, 116, 243, 114, 200, 243, 75, 165, 200, 245, 116, 34, 73, 157, 157, 116, 71, 114, 157, 116, 118, 243, 71, 116, 71, 71, 116, 116, 71, 114, 32, 116, 81, 116, 71, 159, 30, 245, 157, 167, 36, 114, 116, 114, 30, 116, 116, 114, 243, 116, 161, 200, 116, 73, 32, 116, 157, 114, 36, 157, 157, 73, 116, 116, 116, 159, 157, 116, 247, 122, 114, 159, 116, 159, 36, 243, 114, 73, 34, 73, 249, 71, 36, 243, 114, 116, 73, 116, 157, 159, 30, 116, 116, 114, 157, 114, 114, 206, 71, 243, 73, 114, 157, 114, 116, 71, 116, 114, 116, 116, 114, 114, 114, 159, 202, 116, 32, 157, 116, 73, 116, 71, 200, 202, 157, 71, 32, 30, 116, 71, 32, 243, 157, 73, 116, 251, 32, 116, 114, 243, 114, 114, 116, 157, 36, 34, 118, 120, 157, 114, 73, 114, 71, 251, 30, 200, 114, 73, 114, 73, 114, 44, 116, 116, 114, 157, 200, 116, 36, 157, 157, 200, 34, 157, 116, 200, 116, 71, 116, 157, 116, 114, 116, 116, 114, 157, 32, 116, 114, 114, 34, 71, 32, 116, 71, 116, 71, 116, 116, 71, 116, 116, 116, 116, 116, 114, 116, 200, 200, 247, 36, 116, 114, 114, 116, 114, 200, 116, 116, 116, 114, 157, 116, 71, 243, 243, 114, 30, 116, 32, 116, 116, 243, 114, 243, 116, 157, 161, 114, 114, 73, 73, 71, 116, 30, 71, 116, 116, 159, 116, 32, 34, 243, 157, 114, 71, 116, 157, 247, 30, 71, 245, 243, 116, 126, 245, 116, 71, 200, 75, 249, 243, 116, 30, 73, 200, 200, 116, 116, 167, 116, 116, 167, 116, 157, 71, 114, 71, 116, 200, 243, 114, 157, 32, 243, 116, 116, 200, 202, 120, 159, 159, 206, 75, 200, 204, 116, 71, 71, 114, 73, 71, 200, 118, 71, 32, 116, 159, 73, 116, 116, 73, 71, 30, 157, 157, 73, 200, 200, 157, 200, 247, 243, 73, 71, 75, 36, 71, 32, 36, 157, 116, 245, 71, 116, 114, 114, 157, 114, 116, 71, 32, 116, 30, 161, 71, 114, 157, 73, 200, 116, 116, 32, 157, 73, 116, 122, 116, 114, 71, 116, 34, 71, 116, 116, 200, 202, 157, 116, 116, 116, 200, 159, 159, 116, 157, 116, 114, 73, 245, 73, 116, 157, 32, 36, 116, 114, 206, 204, 71, 116, 116, 71, 32, 157, 114, 247, 30, 200, 247, 75, 200, 71, 159, 157, 116, 32, 114, 73, 30, 71, 114, 32, 73, 116, 30, 32, 202, 116, 116, 116, 116, 71, 116, 116, 159, 32, 71, 200, 116, 251, 116, 161, 159, 200, 73, 157, 114, 116, 114, 116, 116, 116, 116, 243, 77, 114, 202, 71, 71, 206, 71, 116, 116, 116, 32, 116, 71, 116, 116, 157, 116, 116, 116, 116, 116, 116, 30, 243, 116, 200, 120, 116, 122, 71, 75, 200, 157, 116, 159, 32, 71, 71, 116, 157, 114, 116, 114, 116, 71, 116, 116, 157, 116, 116, 116, 157, 71, 73, 116, 116, 202, 116, 116, 200, 243, 36, 157, 157, 251, 114, 71, 118, 116, 116, 116, 71, 116, 114, 116, 200, 114, 83, 251, 32, 116, 116, 114, 204, 30, 159, 32, 73, 167, 116, 243, 157, 116, 114, 116, 77, 75, 202, 114, 116, 157, 116, 157, 116, 116, 116, 159, 71, 200, 114, 118, 114, 116, 71, 116, 71, 159, 245, 116, 116, 200, 116, 116, 114, 251, 32, 114, 157, 36, 116, 200, 157, 124, 114, 71, 157, 251, 36, 116, 157, 118, 243, 30, 251, 32, 116, 114, 71, 116, 71, 116, 200, 75, 36, 36, 36, 116, 200, 163, 206, 116, 116, 116, 114, 36, 71, 73, 32, 118, 71, 116, 71, 200, 36, 245, 114, 157, 71, 157, 73, 200, 116, 116, 116, 116, 116, 157, 116, 200, 116, 114, 245, 157, 71, 243, 71, 116, 114, 75, 36, 161, 116, 30, 116, 202, 159, 71, 157, 116, 247, 71, 114, 75, 116, 73, 71, 157, 71, 200, 116, 157, 243, 116, 243, 159, 30, 32, 116, 116, 114, 116, 116, 118, 114, 71, 114, 73, 116, 251, 32, 116, 114, 243, 206, 118, 200, 116, 114, 116, 116, 30, 202, 73, 114, 200, 71, 75, 200, 116, 71, 116, 116, 200, 114, 71, 32, 116, 114, 73, 116, 71, 36, 116, 116, 71, 206, 71, 200, 114, 118, 71, 30, 71, 116, 159, 114, 116, 157, 71, 114, 73, 202, 73, 116, 157, 243, 116, 200, 243, 71, 116, 159, 114, 202, 116, 75, 200, 251, 116, 71, 73, 71, 73, 114, 116, 116, 116, 71, 30, 30, 36, 71, 245, 71, 251, 114, 157, 116, 251, 206, 36, 157, 202, 245, 114, 116, 202, 114, 157, 73, 200, 116, 36, 116, 120, 116, 116, 114, 116, 120, 71, 73, 245, 157, 116, 71, 157, 169, 202, 116, 71, 114, 75, 159, 118, 73, 114, 157, 114, 71, 161, 71, 30, 116, 116, 116, 116, 71, 75, 114, 71, 204, 30, 32, 116, 116, 116, 116, 116, 116, 243, 200, 157, 30, 157, 116, 116, 116, 116, 71, 118, 116, 157, 71, 32, 36, 116, 116, 71, 206, 118, 157, 71, 200, 114, 116, 116, 200, 75, 159, 249, 114, 116, 116, 114, 206, 116, 116, 71, 116, 71, 116, 167, 116, 157, 71, 32, 71, 116, 200, 116, 118, 204, 116, 71, 116, 116, 157, 71, 116, 32, 116, 114, 247, 204, 114, 157, 114, 200, 202, 114, 116, 157, 71, 157, 200, 157, 200, 30, 116, 200, 71, 200, 202, 157, 116, 245, 73, 114, 202, 116, 114, 77, 30, 114, 157, 251, 32, 116, 202, 32, 116, 116, 116, 116, 30, 36, 32, 116, 36, 116, 73, 116, 116, 116, 116, 167, 71, 71, 243, 202, 114, 200, 116, 71, 157, 73, 30, 114, 114, 163, 71, 206, 71, 116, 114, 200, 114, 114, 36, 36, 71, 73, 71, 114, 157, 116, 116, 157, 126, 157, 116, 32, 204, 71, 118, 71, 159, 71, 116, 71, 157, 116, 116, 114, 73, 116, 251, 32, 116, 157, 157, 118, 71, 243, 114, 30, 30, 34, 159, 163, 247, 71, 32, 114, 159, 251, 32, 116, 116, 32, 75, 157, 157, 71, 245, 71, 251, 114, 116, 114, 116, 116, 71, 202, 71, 71, 206, 116, 116, 116, 200, 118, 116, 116, 200, 202, 157, 202, 73, 116, 116, 114, 157, 71, 116, 116, 30, 157, 114, 116, 114, 157, 118, 116, 116, 116, 116, 251, 32, 116, 116, 116, 114, 157, 200, 30, 243, 75, 116, 116, 116, 114, 73, 71, 116, 116, 202, 71, 200, 71, 159, 114, 200, 245, 116, 75, 200, 243, 157, 116, 73, 71, 116, 118, 247, 200, 30, 75, 71, 116, 71, 157, 116, 114, 116, 118, 73, 157, 30, 243, 71, 247, 73, 200, 114, 157, 251, 32, 159, 243, 116, 161, 159, 118, 116, 116, 71, 30, 116, 71, 159, 75, 251, 157, 157, 116, 114, 116, 116, 243, 30, 30, 157, 243, 114, 157, 157, 71, 245, 159, 32, 245, 116, 114, 71, 116, 114, 157, 71, 116, 116, 116, 116, 116, 71, 118, 116, 120, 157, 32, 32, 71, 116, 116, 73, 114, 73, 114, 73, 30, 116, 116, 71, 116, 116, 71, 32, 202, 114, 30, 249, 114, 73, 200, 200, 159, 202, 114, 73, 71, 114, 75, 157, 157, 251, 32, 114, 30, 73, 114, 163, 114, 114, 116, 71, 116, 116, 243, 202, 75, 200, 116, 200, 116, 200, 116, 30, 116, 114, 157, 200, 114, 116, 116, 114, 114, 71, 200, 71, 75, 36, 32, 116, 75, 200, 71, 159, 157, 116, 114, 73, 30, 71, 114, 32, 159, 202, 73, 200, 32, 200, 116, 159, 71, 202, 159, 116, 73, 71, 116, 208, 206, 159, 159, 206, 32, 157, 116, 200, 200, 116, 251, 32, 71, 71, 77, 114, 71, 118, 116, 116, 116, 159, 116, 116, 116, 114, 116, 116, 118, 73, 73, 208, 116, 116, 116, 114, 159, 159, 159, 206, 30, 32, 116, 200, 161, 114, 114, 73, 73, 116, 157, 116, 116, 114, 116, 114, 247, 157, 32, 157, 116, 73, 116, 71, 200, 202, 157, 71, 32, 30, 116, 71, 32, 36, 36, 116, 245, 71, 245, 167, 243, 71, 34, 71, 114, 73, 159, 30, 157, 30, 118, 71, 71, 118, 116, 114, 157, 251, 32, 116, 116, 157, 159, 116, 157, 161, 114, 114, 73, 114, 157, 204, 157, 114, 157, 251, 116, 116, 116, 71, 77, 71, 71, 116, 116, 114, 116, 116, 157, 116, 116, 73, 116, 71, 243, 71, 116, 114, 75, 36, 114, 157, 36, 116, 32, 243, 30, 116, 71, 114, 116, 200, 245, 71, 116, 32, 36, 73, 159, 202, 116, 116, 116, 159, 206, 32, 116, 71, 32, 116, 116, 167, 200, 114, 157, 73, 116, 245, 114, 157, 245, 157, 114, 116, 114, 116, 114, 71, 116, 116, 116, 200, 30, 36, 36, 116, 157, 116, 202, 116, 116, 116, 116, 116, 116, 116, 159, 206, 200, 116, 73, 71, 30, 200, 116, 114, 157, 251, 157, 114, 71, 243, 202, 157, 116, 116, 116, 116, 251, 32, 116, 114, 116, 116, 114, 157, 251, 32, 114, 116, 243, 251, 32, 71, 157, 71, 32, 200, 32, 245, 114, 73, 116, 73, 200, 157, 114, 114, 118, 71, 30, 36, 116, 116, 202, 73, 167, 116, 73, 114, 71, 157, 159, 159, 157, 116, 116, 114, 159, 71, 206, 114, 116, 71, 116, 202, 159, 30, 71, 114, 243, 251, 32, 116, 114, 157, 116, 114, 157, 116, 116, 71, 159, 200, 114, 159, 75, 32, 116, 114, 243, 71, 116, 116, 116, 159, 71, 157, 161, 120, 243, 206, 114, 206, 30, 32, 116, 71, 116, 38, 243, 71, 114, 159, 157, 116, 200, 243, 114, 243, 200, 114, 116, 71, 73, 114, 116, 116, 116, 73, 116, 116, 71, 116, 32, 243, 116, 30, 116, 71, 200, 159, 202, 116, 71, 32, 116, 157, 157, 243, 116, 73, 163, 71, 206, 114, 116, 157, 247, 116, 116, 116, 71, 157, 157, 249, 71, 36, 116, 36, 71, 245, 71, 251, 114, 157, 116, 251, 32, 116, 200, 202, 114, 116, 200, 116, 157, 30, 73, 200, 73, 30, 159, 200, 32, 116, 157, 116, 116, 116, 71, 30, 200, 73, 116, 157, 120, 73, 114, 159, 114, 30, 157, 243, 114, 157, 71, 200, 116, 116, 75, 202, 116, 116, 116, 71, 116, 163, 116, 116, 202, 243, 71, 71, 116, 116, 116, 116, 114, 118, 71, 114, 116, 114, 114, 243, 116, 116, 116, 116, 116, 114, 200, 36, 36, 36, 116, 71, 206, 71, 116, 36, 116, 32, 116, 114, 116, 32, 200, 75, 116, 30, 116, 30, 114, 116, 163, 83, 32, 114, 116, 114, 243, 243, 114, 77, 243, 157, 71, 116, 116, 247, 116, 116, 114, 83, 157, 245, 116, 116, 116, 116, 114, 159, 30, 116, 118, 116, 116, 116, 249, 114, 116, 114, 30, 116, 157, 200, 114, 243, 157, 114, 159, 30, 116, 251, 116, 249, 243, 157, 71, 116, 71, 30, 116, 251, 32, 116, 30, 114, 157, 200, 157, 73, 71, 120, 116, 200, 124, 71, 157, 71, 157, 114, 157, 73, 116, 116, 114, 116, 116, 30, 116, 116, 116, 116, 116, 157, 116, 200, 116, 114, 245, 157, 71, 32, 116, 71, 71, 73, 245, 116, 116, 116, 114, 73, 116, 71, 116, 114, 116, 71, 36, 243, 114, 73, 116, 114, 116, 71, 36, 243, 114, 73, 34, 73, 249, 71, 36, 243, 114, 73, 34, 73, 249, 71, 202, 75, 71, 243, 71, 116, 200, 114, 71, 75, 77, 71, 200, 71, 247, 116, 71, 157, 118, 30, 245, 161, 71, 30, 243, 71, 159, 116, 36, 73, 34, 32, 114, 157, 71, 71, 116, 157, 30, 118, 32, 73, 116, 114, 114, 200, 159, 114, 116, 71, 157, 243, 116, 204, 71, 157, 251, 71, 243, 73, 116, 157, 71, 32, 251, 243, 157, 114, 157, 251, 32, 157, 114, 116, 245, 159, 73, 116, 116, 157, 71, 73, 157, 243, 71, 116, 251, 32, 243, 243, 36, 157, 116, 200, 71, 32, 159, 243, 116, 161, 204, 116, 114, 116, 200, 114, 116, 116, 116, 116, 32, 202, 159, 114, 114, 30, 116, 116, 30, 159, 32, 116, 243, 251, 243, 116, 71, 36, 116, 114, 30, 116, 114, 73, 114, 73, 157, 30, 73, 245, 83, 157, 116, 114, 116, 73, 243, 30, 116, 114, 116, 157, 116, 114, 116, 73, 116, 116, 116, 30, 114, 116, 71, 116, 116, 32, 116, 116, 159, 71, 157, 73, 204, 71, 36, 36, 116, 116, 73, 73, 36, 71, 71, 32, 116, 73, 114, 73, 200, 159, 200, 114, 116, 114, 116, 200, 71, 75, 200, 114, 157, 157, 157, 30, 116, 73, 157, 114, 73, 114, 116, 116, 157, 71, 157, 200, 114, 116, 114, 71, 200, 73, 202, 30, 30, 157, 116, 243, 114, 114, 157, 251, 32, 116, 116, 202, 71, 159, 157, 71, 157, 32, 73, 38, 71, 116, 245, 116, 157, 114, 71, 30, 30, 200, 71, 157, 73, 161, 116, 157, 71, 157, 114, 116, 116, 116, 114, 73, 116, 116, 118, 30, 32, 157, 200, 249, 114, 30, 71, 157, 32, 73, 116, 116, 157, 202, 114, 116, 116, 114, 114, 32, 118, 71, 243, 116, 71, 116, 116, 114, 116, 75, 116, 30, 73, 71, 116, 159, 116, 36, 116, 36, 114, 116, 116, 116, 36, 157, 116, 114, 116, 114, 200, 73, 114, 73, 116, 116, 116, 114, 75, 114, 161, 116, 116, 116, 114, 116, 116, 116, 167, 116, 116, 116, 116, 114, 157, 116, 116, 116, 116, 73, 114, 73, 73, 116, 114, 116, 116, 159, 71, 71, 32, 116, 245, 116, 114, 245, 116, 159, 120, 114, 116, 116, 159, 114, 32, 118, 114, 157, 114, 71, 30, 118, 71, 116, 71, 116, 157, 200, 73, 114, 71, 116, 200, 71, 116, 202, 157, 116, 114, 83, 157, 71, 116, 116, 114, 251, 32, 116, 114, 116, 116, 116, 167, 243, 157, 243, 157, 116, 116, 116, 208, 243, 32, 71, 157, 71, 77, 71, 71, 116, 116, 116, 157, 116, 114, 206, 208, 200, 32, 116, 38, 71, 71, 157, 118, 116, 116, 116, 71, 116, 114, 116, 73, 116, 118, 114, 245, 116, 116, 116, 116, 116, 116, 114, 114, 73, 116, 73, 116, 71, 202, 73, 116, 71, 36, 159, 116, 114, 157, 200, 71, 200, 200, 71, 200, 159, 114, 122, 159, 71, 159, 206, 71, 36, 114, 116, 114, 116, 71, 116, 114, 116, 114, 71, 73, 116, 71, 114, 116, 73, 71, 159, 118, 116, 116, 114, 73, 245, 73, 73, 36, 73, 73, 116, 116, 71, 116, 120, 116, 200, 159, 202, 71, 167, 116, 116, 71, 116, 116, 157, 161, 114, 114, 73, 73, 71, 116, 30, 114, 116, 116, 206, 245, 114, 116, 114, 157, 200, 114, 116, 204, 165, 116, 116, 116, 114, 157, 157, 114, 116, 116, 157, 118, 245, 71, 116, 116, 116, 71, 200, 116, 116, 116, 114, 71, 245, 71, 251, 114, 73, 157, 116, 116, 71, 116, 75, 116, 116, 116, 71, 159, 73, 116, 73, 114, 116, 114, 157, 116, 247, 114, 75, 116, 116, 114, 157, 200, 30, 161, 202, 32, 157, 116, 116, 118, 114, 114, 71, 247, 71, 116, 71, 202, 116, 73, 71, 30, 116, 73, 159, 159, 202, 116, 71, 71, 200, 116, 116, 71, 116, 116, 116, 165, 206, 202, 116, 71, 71, 243, 159, 116, 116, 116, 79, 118, 116, 116, 116, 30, 114, 243, 71, 200, 116, 200, 73, 71, 116, 116, 167, 116, 116, 71, 116, 116, 116, 116, 118, 116, 116, 116, 116, 116, 157, 116, 116, 157, 114, 157, 116, 157, 71, 32, 114, 202, 116, 116, 208, 116, 116, 118, 114, 159, 116, 116, 116, 114, 157, 34, 73, 116, 200, 161, 114, 114, 73, 73, 116, 116, 116, 71, 116, 32, 36, 114, 32, 114, 71, 32, 77, 71, 71, 116, 114, 73, 114, 157, 157, 73, 157, 245, 32, 116, 116, 116, 251, 32, 116, 157, 157, 245, 116, 71, 200, 159, 71, 116, 71, 71, 83, 32, 245, 157, 114, 116, 114, 116, 114, 71, 116, 120, 157, 157, 116, 114, 157, 116, 157, 116, 202, 116, 120, 73, 71, 73, 202, 116, 159, 206, 200, 116, 159, 116, 34, 116, 116, 159, 71, 157, 71, 71, 116, 32, 200, 71, 116, 116, 32, 114, 157, 251, 71, 208, 36, 30, 114, 157, 251, 32, 116, 71, 243, 243, 71, 36, 157, 32, 200, 159, 114, 71, 157, 202, 114, 114, 114, 157, 157, 251, 116, 116, 116, 114, 116, 116, 116, 114, 116, 36, 116, 114, 157, 71, 116, 157, 251, 200, 114, 71, 71, 206, 159, 251, 30, 157, 114, 116, 157, 30, 73, 245, 83, 243, 36, 243, 204, 243, 73, 116, 159, 71, 202, 159, 116, 116, 36, 200, 73, 116, 116, 71, 116, 116, 243, 114, 71, 200, 71, 116, 32, 71, 36, 116, 114, 157, 251, 30, 157, 159, 116, 114, 30, 116, 71, 118, 114, 157, 114, 71, 30, 114, 116, 73, 245, 157, 71, 32, 114, 202, 116, 116, 208, 116, 202, 116, 116, 116, 200, 157, 73, 251, 116, 116, 116, 116, 157, 36, 116, 114, 157, 71, 116, 159, 114, 202, 116, 114, 77, 157, 116, 116, 200, 114, 157, 157, 73, 116, 116, 116, 116, 202, 116, 71, 200, 71, 114, 116, 116, 114, 116, 116, 71, 116, 71, 32, 71, 32, 116, 71, 210, 114, 159, 116, 116, 32, 161, 71, 116, 243, 243, 114, 243, 116, 114, 116, 159, 116, 116, 71, 159, 116, 157, 157, 251, 116, 116, 116, 116, 71, 71, 116, 116, 157, 124, 32, 114, 116, 114, 114, 71, 30, 32, 202, 30, 157, 116, 114, 159, 30, 202, 116, 157, 200, 116, 116, 116, 116, 116, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 200, 243, 116, 116, 114, 116, 71, 116, 73, 36, 36, 243, 71, 73, 157, 114, 71, 30, 157, 114, 116, 157, 71, 71, 116, 116, 243, 118, 116, 32, 116, 114, 157, 116, 116, 114, 116, 116, 116, 116, 114, 157, 157, 116, 157, 71, 32, 30, 157, 200, 116, 71, 116, 114, 159, 116, 114, 243, 157, 116, 71, 116, 116, 116, 73, 114, 73, 73, 116, 71, 118, 114, 116, 114, 157, 251, 32, 75, 36, 36, 116, 114, 157, 251, 30, 71, 32, 116, 71, 116, 157, 114, 116, 114, 116, 114, 116, 116, 114, 157, 116, 116, 200, 118, 71, 157, 116, 30, 71, 73, 32, 118, 71, 116, 71, 36, 116, 157, 71, 32, 118, 114, 73, 114, 73, 116, 71, 116, 116, 157, 114, 116, 116, 116, 116, 116, 116, 71, 71, 159, 157, 116, 157, 75, 36, 116, 116, 116, 116, 114, 157, 116, 167, 116, 243, 116, 251, 32, 71, 71, 114, 30, 73, 71, 30, 157, 157, 73, 36, 114, 116, 116, 200, 75, 243, 157, 159, 114, 71, 30, 157, 157, 206, 114, 73, 32, 116, 32, 157, 116, 243, 116, 73, 116, 159, 32, 116, 202, 243, 118, 32, 116, 116, 116, 116, 200, 114, 159, 116, 116, 114, 157, 159, 159, 157, 116, 116, 116, 71, 30, 202, 157, 200, 32, 116, 116, 71, 71, 34, 243, 200, 116, 116, 71, 116, 243, 118, 200, 200, 114, 159, 116, 116, 114, 157, 200, 114, 75, 114, 159, 30, 32, 157, 157, 157, 243, 116, 114, 116, 71, 116, 116, 116, 116, 71, 114, 116, 116, 116, 116, 202, 114, 73, 116, 71, 245, 116, 116, 116, 202, 200, 71, 30, 71, 249, 114, 73, 202, 116, 116, 116, 32, 73, 71, 114, 243, 243, 116, 73, 116, 71, 32, 114, 116, 32, 116, 157, 71, 32, 118, 116, 32, 200, 36, 116, 114, 116, 116, 157, 30, 75, 116, 71, 71, 157, 200, 114, 116, 71, 116, 71, 116, 114, 32, 116, 36, 36, 116, 114, 157, 200, 116, 71, 116, 114, 32, 159, 116, 116, 114, 116, 71, 157, 157, 157, 251, 32, 157, 116, 157, 116, 167, 116, 157, 114, 157, 167, 116, 157, 243, 116, 71, 114, 36, 36, 36, 116, 71, 116, 116, 114, 157, 116, 32, 165, 116, 118, 114, 71, 157, 32, 118, 116, 73, 116, 71, 36, 159, 30, 202, 157, 202, 30, 114, 116, 157, 32, 204, 71, 206, 71, 116, 114, 116, 114, 157, 251, 114, 116, 71, 202, 30, 36, 36, 116, 114, 114, 71, 75, 200, 30, 116, 116, 30, 159, 32, 116, 116, 73, 30, 116, 251, 116, 247, 157, 71, 32, 32, 71, 116, 30, 157, 200, 157, 204, 32, 36, 157, 116, 200, 114, 116, 116, 73, 71, 157, 30, 71, 116, 157, 114, 116, 114, 116, 116, 116, 71, 114, 73, 114, 243, 114, 200, 71, 118, 114, 159, 116, 116, 116, 114, 157, 34, 73, 116, 116, 71, 116, 116, 114, 116, 116, 200, 157, 157, 116, 71, 116, 116, 116, 32, 36, 36, 75, 114, 116, 114, 116, 116, 116, 116, 116, 159, 71, 157, 161, 116, 116, 251, 32, 71, 245, 116, 116, 116, 30, 116, 116, 71, 118, 73, 116, 157, 114, 116, 159, 71, 157, 251, 157, 114, 71, 30, 157, 116, 71, 116, 32, 114, 116, 114, 116, 116, 157, 116, 116, 116, 159, 159, 157, 200, 114, 116, 116, 114, 116, 116, 245, 116, 114, 157, 251, 36, 116, 157, 157, 73, 200, 36, 116, 32, 116, 114, 157, 36, 116, 30, 114, 157, 200, 157, 114, 116, 30, 120, 116, 116, 116, 202, 71, 118, 71, 71, 116, 114, 116, 116, 202, 200, 71, 30, 71, 249, 36, 114, 116, 73, 200, 71, 116, 114, 73, 71, 206, 116, 118, 114, 114, 159, 116, 202, 157, 116, 116, 30, 71, 114, 116, 116, 114, 116, 114, 116, 36, 157, 114, 116, 116, 116, 116, 200, 36, 116, 114, 71, 116, 32, 157, 73, 116, 116, 245, 116, 116, 116, 71, 116, 116, 243, 116, 116, 116, 116, 114, 116, 116, 114, 116, 71, 116, 114, 116, 73, 116, 116, 116, 30, 114, 116, 71, 116, 116, 116, 73, 116, 71, 243, 71, 116, 114, 75, 36, 114, 157, 116, 116, 116, 116, 126, 202, 114, 116, 200, 243, 114, 243, 34, 73, 157, 118, 200, 200, 157, 200, 159, 30, 30, 157, 116, 114, 116, 167, 116, 157, 71, 30, 114, 116, 71, 116, 126, 202, 114, 116, 200, 243, 114, 157, 116, 116, 114, 116, 114, 71, 114, 116, 116, 114, 206, 114, 157, 73, 167, 116, 243, 116, 251, 32, 71, 71, 157, 71, 116, 116, 71, 200, 202, 114, 114, 116, 73, 116, 116, 116, 116, 71, 116, 243, 116, 251, 32, 116, 206, 159, 251, 32, 73, 116, 116, 157, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 73, 116, 116, 30, 71, 243, 243, 200, 71, 116, 32, 157, 36, 36, 116, 114, 157, 251, 32, 116, 71, 116, 116, 71, 159, 206, 32, 116, 71, 32, 71, 116, 116, 114, 114, 157, 73, 71, 200, 71, 75, 36, 71, 116, 71, 116, 32, 157, 116, 200, 73, 202, 243, 71, 71, 116, 32, 116, 116, 116, 71, 114, 157, 116, 71, 243, 71, 116, 116, 116, 161, 30, 75, 116, 71, 159, 116, 157, 157, 251, 243, 157, 114, 157, 251, 32, 75, 114, 161, 116, 116, 116, 114, 116, 116, 116, 167, 116, 116, 32, 243, 71, 71, 116, 157, 251, 32, 116, 157, 116, 159, 71, 157, 116, 249, 251, 32, 116, 114, 157, 116, 200, 32, 116, 114, 251, 32, 116, 157, 128, 118, 71, 116, 114, 114, 71, 73, 116, 116, 157, 71, 243, 157, 116, 32, 36, 73, 251, 32, 114, 157, 245, 116, 245, 116, 71, 71, 157, 251, 71, 247, 245, 114, 73, 200, 200, 157, 73, 116, 116, 116, 73, 116, 30, 73, 251, 32, 116, 38, 243, 116, 114, 73, 114, 71, 204, 116, 114, 114, 73, 73, 159, 202, 116, 71, 116, 116, 116, 118, 116, 116, 116, 71, 114, 243, 114, 200, 114, 73, 116, 71, 116, 32, 167, 206, 32, 116, 114, 157, 71, 157, 116, 116, 114, 116, 114, 71, 114, 116, 116, 114, 116, 30, 116, 116, 116, 71, 30, 116, 251, 32, 71, 71, 157, 71, 116, 116, 71, 202, 159, 114, 114, 114, 116, 116, 116, 114, 30, 116, 251, 32, 71, 71, 77, 116, 71, 71, 251, 73, 71, 71, 32, 36, 157, 157, 243, 116, 157, 73, 200, 71, 116, 116, 116, 157, 73, 116, 116, 200, 159, 116, 116, 116, 116, 167, 116, 116, 71, 73, 116, 71, 200, 116, 200, 73, 157, 114, 73, 157, 32, 157, 200, 30, 75, 200, 116, 200, 116, 200, 116, 30, 73, 71, 116, 159, 116, 71, 116, 202, 73, 71, 116, 71, 159, 114, 114, 116, 73, 116, 116, 116, 30, 202, 157, 116, 30, 75, 71, 36, 116, 114, 157, 71, 116, 159, 114, 71, 157, 251, 114, 200, 114, 116, 116, 157, 120, 247, 204, 114, 116, 73, 71, 73, 202, 120, 159, 159, 206, 75, 71, 200, 159, 159, 159, 116, 34, 116, 116, 159, 71, 157, 71, 71, 116, 32, 200, 71, 116, 116, 157, 206, 71, 116, 116, 116, 71, 73, 157, 116, 200, 77, 114, 116, 116, 71, 73, 36, 243, 32, 73, 116, 30, 202, 71, 71, 71, 114, 116, 116, 32, 71, 200, 202, 116, 243, 71, 116, 159, 157, 116, 118, 204, 116, 114, 157, 30, 157, 71, 32, 73, 30, 114, 116, 32, 245, 157, 71, 157, 200, 71, 116, 157, 36, 36, 116, 157, 116, 116, 116, 116, 71, 116, 79, 243, 32, 30, 73, 73, 116, 116, 71, 114, 75, 30, 73, 116, 116, 116, 116, 30, 243, 159, 71, 116, 251, 116, 36, 116, 114, 159, 71, 36, 71, 243, 159, 71, 116, 114, 116, 206, 71, 114, 157, 73, 116, 116, 71, 116, 71, 201, 201, 201, 201, 201, 201, 201, 201, 36, 36, 36, 36, 36, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 116, 116, 116, 167, 116, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 116, 71, 116, 32, 36, 36, 36, 36, 116, 114, 157, 251, 32, 116, 71, 116, 116, 116, 71, 116, 36, 116, 114, 157, 251, 32, 116, 116, 71, 116, 116, 116, 71, 36, 116, 114, 157, 159, 116, 32, 36, 36, 36, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 32, 116, 116, 71, 116, 114, 157, 251, 32, 116, 116, 116, 116, 114, 116, 116, 116, 114, 116, 116, 114, 157, 251, 32, 116, 116, 71, 116, 114, 157, 116, 116, 114, 116, 114, 116, 116, 116, 116, 116, 116, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 32, 36, 114, 116, 73, 73, 36, 116, 114, 157, 251, 32, 116, 71, 116, 159, 36, 36, 116, 116, 116, 116, 116, 114, 116, 116, 114, 206, 71, 116, 116, 116, 116, 114, 116, 116, 114, 116, 116, 116, 116, 116, 71, 116, 114, 157, 251, 32, 157, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 32, 36, 114, 116, 73, 73, 71, 116, 114, 157, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 32, 202, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 116, 157, 34, 157, 116, 114, 116, 116, 118, 114, 116, 116, 116, 116, 116, 71, 116, 114, 157, 251, 32, 157, 116, 116, 71, 116, 116, 116, 116, 114, 116, 73, 73, 36, 116, 114, 157, 36, 36, 116, 116, 116, 73, 73, 36, 71, 36, 36, 36, 116, 116, 116, 116, 157, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 71, 116, 32, 73, 71, 116, 32, 36, 30, 114, 157, 251, 32, 116, 114, 157, 116, 200, 32, 116, 71, 116, 116, 71, 116, 116, 71, 116, 114, 157, 251, 32, 116, 116, 116, 116, 114, 116, 116, 116, 32, 73, 71, 116, 32, 116, 116, 116, 116, 159, 71, 157, 116, 114, 116, 116, 118, 114, 71, 71, 206, 71, 116, 116, 116, 202, 116, 116, 116, 116, 116, 114, 116, 116, 116, 71, 116, 114, 157, 73, 200, 157, 200, 157, 200, 157, 200, 71, 75, 36, 32, 116, 116, 116, 116, 114, 73, 114, 73, 114, 73, 114, 73, 116, 116, 116, 157, 116, 167, 116, 157, 71, 32, 116, 71, 116, 116, 71, 116, 251, 32, 116, 114, 116, 116, 114, 206, 71, 116, 116, 202, 157, 251, 32, 116, 116, 71, 116, 200, 124, 116, 114, 157, 159, 116, 116, 116, 202, 116, 116, 116, 116, 116, 114, 159, 118, 114, 71, 71, 206, 71, 116, 116, 116, 116, 116, 116, 71, 73, 114, 116, 116, 116, 71, 116, 32, 157, 73, 30, 157, 116, 114, 157, 251, 32, 157, 159, 202, 116, 73, 73, 116, 114, 157, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 116, 71, 116, 116, 116, 116, 116, 114, 116, 114, 157, 251, 32, 200, 116, 116, 116, 116, 116, 159, 118, 114, 71, 71, 206, 116, 116, 116, 159, 71, 157, 157, 249, 116, 116, 116, 116, 116, 114, 159, 32, 157, 116, 116, 116, 71, 116, 116, 116, 116, 116, 71, 36, 116, 116, 116, 116, 116, 71, 116, 32, 157, 73, 116, 116, 116, 116, 114, 32, 73, 71, 116, 202, 157, 116, 116, 116, 116, 73, 114, 116, 116, 118, 73, 116, 116, 116, 157, 116, 167, 36, 71, 36, 36, 32, 157, 159, 202, 116, 32, 71, 116, 116, 116, 71, 36, 116, 114, 157, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 251, 32, 114, 157, 159, 116, 243, 116, 73, 73, 71, 116, 114, 157, 161, 157, 251, 116, 71, 116, 116, 200, 114, 159, 30, 116, 116, 116, 71, 116, 157, 34, 157, 116, 200, 116, 116, 116, 116, 71, 71, 206, 116, 73, 114, 116, 116, 118, 204, 116, 116, 116, 120, 30, 114, 116, 73, 114, 116, 116, 118, 116, 36, 116, 114, 157, 116, 114, 157, 251, 32, 114, 157, 159, 206, 71, 116, 116, 200, 71, 116, 157, 34, 116, 116, 116, 71, 36, 116, 116, 116, 116, 206, 116, 73, 114, 116, 71, 116, 116, 116, 116, 71, 116, 159, 116, 116, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 114, 75, 36, 32, 116, 116, 116, 116, 114, 116, 114, 116, 114, 116, 200, 116, 116, 116, 114, 157, 200, 157, 116, 167, 116, 157, 71, 32, 116, 116, 116, 71, 36, 116, 114, 157, 116, 71, 73, 116, 157, 71, 32, 116, 114, 157, 251, 114, 116, 73, 73, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 157, 114, 116, 116, 116, 116, 167, 116, 116, 71, 73, 116, 157, 71, 32, 116, 114, 30, 159, 116, 116, 116, 157, 114, 116, 114, 116, 116, 116, 116, 116, 116, 71, 116, 114, 116, 32, 157, 73, 71, 116, 36, 116, 114, 157, 251, 32, 116, 116, 157, 251, 32, 32, 157, 71, 116, 114, 75, 36, 32, 116, 116, 116, 116, 114, 116, 116, 116, 116, 71, 116, 157, 34, 204, 116, 114, 157, 251, 243, 116, 200, 116, 116, 116, 114, 157, 200, 157, 116, 251, 32, 157, 116, 116, 71, 116, 116, 114, 157, 116, 114, 116, 116, 118, 116, 116, 116, 116, 116, 71, 116, 116, 157, 114, 157, 71, 32, 116, 114, 157, 116, 114, 116, 116, 118, 73, 159, 202, 116, 71, 116, 116, 71, 116, 32, 71, 118, 116, 116, 71, 116, 116, 157, 114, 116, 116, 116, 116, 167, 116, 116, 71, 116, 116, 118, 114, 71, 71, 206, 30, 32, 116, 116, 114, 116, 200, 71, 116, 157, 34, 116, 116, 116, 116, 116, 116, 159, 116, 116, 116, 114, 116, 116, 116, 116, 247, 116, 116, 116, 116, 116, 116, 116, 116, 114, 83, 251, 32, 116, 116, 116, 116, 114, 36, 116, 116, 116, 116, 71, 116, 116, 116, 200, 36, 116, 114, 32, 116, 116, 71, 116, 116, 116, 116, 71, 71, 157, 116, 116, 120, 30, 114, 116, 114, 157, 116, 114, 116, 116, 71, 116, 114, 116, 71, 116, 157, 34, 157, 116, 114, 116, 116, 118, 114, 116, 116, 161, 159, 118, 114, 71, 73, 32, 116, 116, 116, 114, 71, 243, 116, 116, 116, 116, 200, 73, 157, 245, 32, 116, 116, 116, 71, 36, 116, 114, 114, 73, 114, 73, 116, 116, 118, 114, 71, 114, 157, 251, 114, 73, 73, 159, 202, 116, 71, 116, 116, 116, 116, 114, 32, 73, 71, 116, 202, 157, 116, 116, 116, 116, 73, 114, 116, 120, 30, 114, 116, 36, 116, 114, 157, 251, 32, 116, 116, 157, 251, 32, 32, 245, 114, 73, 114, 159, 30, 32, 116, 116, 114, 116, 200, 71, 116, 157, 34, 116, 71, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 114, 73, 71, 206, 71, 116, 116, 71, 32, 116, 114, 157, 116, 114, 116, 116, 116, 157, 116, 167, 116, 116, 116, 116, 116, 200, 73, 157, 245, 32, 116, 116, 32, 114, 73, 114, 116, 116, 116, 116, 120, 157, 32, 116, 71, 116, 116, 159, 202, 116, 73, 73, 116, 157, 159, 251, 32, 116, 114, 116, 116, 114, 206, 71, 116, 200, 71, 116, 116, 71, 116, 251, 157, 159, 202, 116, 71, 116, 116, 116, 71, 36, 116, 114, 157, 116, 116, 116, 157, 114, 116, 114, 116, 116, 114, 36, 36, 116, 116, 116, 116, 157, 116, 114, 157, 251, 32, 114, 243, 243, 71, 36, 32, 116, 36, 116, 116, 116, 116, 116, 114, 157, 251, 32, 202, 116, 71, 36, 116, 114, 157, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 71, 116, 71, 116, 159, 36, 36, 116, 116, 116, 157, 114, 71, 73, 116, 157, 71, 32, 116, 114, 157, 251, 32, 116, 116, 159, 71, 157, 116, 30, 71, 73, 71, 116, 116, 114, 157, 116, 114, 116, 73, 116, 114, 83, 251, 32, 71, 157, 71, 32, 116, 71, 116, 116, 71, 116, 32, 204, 116, 71, 116, 116, 71, 200, 159, 202, 116, 71, 159, 116, 157, 34, 116, 71, 71, 116, 116, 116, 116, 114, 73, 157, 159, 206, 71, 114, 73, 71, 206, 116, 118, 114, 71, 114, 157, 251, 114, 73, 73, 159, 202, 116, 71, 116, 116, 114, 116, 120, 30, 73, 71, 116, 200, 243, 71, 116, 159, 159, 116, 116, 159, 71, 157, 116, 30, 71, 73, 71, 116, 116, 114, 157, 116, 114, 116, 116, 200, 36, 36, 36, 32, 116, 116, 251, 32, 202, 116, 71, 36, 32, 116, 116, 116, 200, 200, 114, 159, 30, 116, 251, 32, 71, 157, 71, 34, 157, 116, 200, 116, 71, 116, 116, 157, 116, 114, 116, 108, 123, 108, 123, 123, 108, 108, 108, 123, 108, 123, 108, 108, 108, 108, 123, 108, 108, 180, 41, 8, 23, 108, 108, 108, 108, 138, 108, 108, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 114, 114, 114, 114, 22, 22, 22, 22, 22, 114, 114, 114, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 174, 174, 174, 209, 36, 36, 36, 36, 36, 114, 114, 114, 114, 114, 114, 114, 36, 36, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 126, 36, 36, 114, 114, 114, 114, 36, 36, 36, 209, 209, 36, 36, 36, 36, 22, 22, 22, 22, 22, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 22, 22, 22, 36, 36, 36, 36, 36, 209, 36, 36, 114, 114, 114, 209, 36, 209, 36, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 36, 209, 36, 126, 209, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 209, 36, 36, 36, 126, 36, 36, 36, 36, 36, 36, 126, 36, 36, 126, 36, 36, 36, 209, 209, 36, 36, 36, 36, 36, 36, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 78, 195, 245, 59, 217, 14, 252, 44, 26, 12, 204, 59, 197, 142, 29, 197, 226, 175, 153, 217, 212, 98, 72, 19, 172, 89, 208, 43, 201, 108, 168, 138, 233, 88, 222, 65, 209, 31, 219, 186, 171, 126, 201, 32, 28, 155, 152, 86, 47, 225, 214, 5, 41, 87, 218, 40, 219, 12, 182, 245, 144, 149, 218, 88, 138, 18, 23, 157, 207, 218, 63, 171, 193, 220, 201, 220, 193, 66, 238, 192, 83, 47, 65, 43, 37, 76, 151, 159, 213, 58, 37, 142, 81, 216, 39, 134, 47, 167, 133, 158, 180, 122, 162, 227, 84, 83, 104, 58, 235, 206, 64, 122, 174, 54, 91, 80, 157, 33, 21, 120, 18, 118, 61, 122, 48, 22, 122, 144, 76, 233, 62, 147, 232, 105, 138, 217, 233, 34, 205, 1, 210, 151, 77, 116, 61, 101, 184, 187, 203, 254, 62, 66, 234, 95, 67, 203, 31, 0, 6, 116, 8, 5, 0, 172, 228, 252, 22, 1, 242, 247, 126, 255, 13, 38, 145, 189, 161, 254, 56, 54, 70, 189, 110, 93, 52, 210, 113, 108, 149, 244, 54, 218, 169, 194, 160, 86, 227, 129, 134, 5, 107, 215, 232, 14, 253, 136, 195, 147, 239, 137, 214, 91, 174, 242, 172, 88, 7, 210, 14, 143, 136, 24, 154, 75, 33, 176, 210, 70, 183, 31, 146, 243, 123, 21, 158, 57, 149, 174, 191, 143, 169, 216, 31, 127, 98, 246, 166, 161, 32, 139, 196, 150, 150, 233, 124, 156, 67, 90, 169, 246, 181, 149, 119, 46, 103, 140, 15, 131, 209, 103, 172, 27, 198, 170, 71, 36, 56, 251, 205, 158, 106, 154, 9, 15, 48, 104, 237, 222, 140, 34, 198, 60, 198, 206, 247, 49, 238, 93, 65, 170, 87, 188, 183, 166, 154, 43, 49, 44, 83, 44, 26, 31, 92, 234, 67, 174, 144, 124, 121, 73, 143, 176, 3, 197, 50, 239, 110, 84, 164, 161, 253, 22, 226, 89, 138, 67, 205, 63, 128, 230, 207, 115, 81, 221, 70, 2, 196, 162, 214, 11, 89, 150, 202, 169, 247, 60, 117, 142, 243, 203, 85, 96, 41, 16, 47, 121, 182, 51, 172, 125, 180, 120, 79, 86, 233, 209, 186, 81, 154, 174, 9, 60, 57, 166, 190, 107, 179, 121, 100, 150, 119, 181, 198, 45, 36, 184, 66, 30, 54, 239, 14, 196, 71, 248, 184, 166, 252, 8, 132, 40, 235, 170, 99, 124, 174, 165, 24, 171, 186, 186, 191, 94, 44, 188, 204, 62, 219, 192, 30, 143, 20, 58, 67, 14, 86, 52, 15, 253, 142, 137, 61, 24, 171, 220, 177, 194, 133, 56, 202, 110, 202, 120, 66, 22, 97, 110, 169, 180, 172, 47, 96, 12, 102, 122, 108, 214, 183, 41, 82, 108, 192, 10, 159, 104, 26, 152, 127, 122, 112, 217, 180, 2, 186, 60, 107, 206, 108, 78, 78, 16, 113, 232, 230, 68, 17, 153, 65, 90, 243, 116, 169, 69, 112, 24, 220, 170, 146, 19, 208, 89, 129, 30, 50, 138, 81, 223, 3, 98, 114, 203, 91, 32, 143, 28, 186, 234, 116, 62, 81, 142, 177, 54, 9, 189, 132, 14, 102, 72, 203, 1, 27, 55, 91, 212, 180, 209, 187, 201, 206, 64, 219, 24, 143, 74, 197, 247, 220, 21, 137, 80, 64, 225, 174, 57, 189, 78, 198, 143, 133, 216, 148, 115, 7, 197, 144, 102, 167, 163, 153, 182, 77, 141, 238, 82, 220, 1, 115, 153, 153, 26, 118, 50, 158, 189, 61, 111, 182, 52, 0, 246, 213, 245, 7, 123, 25, 118, 148, 224, 206, 243, 34, 204, 206, 128, 154, 116, 235, 229, 255, 193, 254, 111, 130, 0, 27, 55, 94, 251, 78, 70, 28, 63, 170, 14, 92, 159, 199, 218, 131, 89, 124, 242, 87, 184, 158, 222, 48, 208, 246, 49, 78, 33, 143, 251, 211, 195, 1, 239, 206, 47, 37, 12, 65, 56, 142, 238, 112, 172, 107, 188, 139, 122, 64, 54, 11, 71, 220, 255, 223, 63, 27, 206, 129, 204, 56, 32, 34, 63, 84, 78, 245, 199, 48, 143, 181, 186, 45, 94, 191, 157, 19, 58, 22, 214, 244, 209, 106, 82, 68, 225, 65, 134, 253, 7, 230, 159, 233, 58, 48, 173, 136, 28, 73, 116, 245, 37, 179, 20, 16, 121, 42, 124, 171, 214, 109, 244, 117, 149, 244, 19, 222, 121, 26, 175, 115, 188, 213, 190, 142, 190, 137, 188, 22, 30, 127, 227, 46, 147, 128, 195, 230, 207, 76, 47, 182, 172, 171, 47, 202, 172, 37, 207, 93, 0, 226, 20, 79, 121, 103, 129, 196, 103, 57, 170, 91, 124, 22, 133, 69, 231, 34, 99, 249, 99, 245, 21, 47, 249, 194, 148, 91, 201, 104, 162, 167, 171, 169, 200, 67, 160, 91, 170, 144, 92, 240, 71, 243, 255, 137, 169, 179, 92, 6, 147, 91, 73, 177, 72, 111, 16, 111, 118, 153, 5, 234, 238, 60, 164, 140, 122, 131, 86, 27, 105, 140, 212, 106, 180, 235, 45, 89, 148, 161, 73, 168, 12, 124, 72, 110, 3, 202, 247, 1, 195, 183, 79, 200, 56, 41, 46, 175, 35, 54, 91, 28, 179, 104, 109, 10, 227, 252, 211, 182, 118, 100, 165, 25, 202, 101, 64, 113, 202, 103, 96, 7, 95, 107, 242, 203, 184, 239, 190, 152, 109, 149, 84, 64, 60, 40, 238, 30, 109, 0, 116, 45, 47, 101, 236, 170, 39, 210, 219, 139, 79, 44, 240, 215, 198, 3, 143, 128, 218, 121, 130, 34, 207, 138, 224, 129, 35, 49, 123, 222, 56, 203, 110, 218, 250, 142, 70, 50, 213, 139, 49, 236, 74, 8, 16, 149, 157, 143, 154, 131, 133, 193, 103, 225, 241, 251, 85, 187, 170, 195, 92, 119, 91, 90, 114, 191, 215, 79, 177, 109, 161, 214, 43, 32, 31, 126, 74, 214, 160, 48, 11, 48, 99, 10, 195, 4, 179, 82, 237, 188, 194, 136, 79, 41, 90, 190, 18, 166, 54, 139, 93, 16, 81, 32, 193, 142, 162, 93, 75, 48, 182, 94, 77, 73, 114, 219, 229, 241, 49, 121, 253, 145, 102, 200, 227, 179, 51, 251, 157, 77, 121, 243, 54, 157, 135, 197, 231, 133, 126, 153, 118, 114, 183, 64, 205, 88, 218, 90, 34, 200, 234, 176, 196, 218, 125, 108, 214, 161, 81, 194, 225, 32, 96, 180, 3, 163, 94, 54, 146, 115, 130, 93, 71, 16, 23, 161, 121, 71, 68, 39, 31, 194, 141, 241, 24, 254, 217, 189, 66, 148, 202, 14, 96, 226, 101, 238, 131, 116, 38, 160, 251, 98, 96, 136, 195, 178, 88, 217, 18, 38, 221, 81, 66, 193, 24, 204, 170, 151, 86, 193, 225, 236, 134, 71, 167, 83, 205, 94, 160, 143, 93, 187, 210, 228, 215, 55, 209, 239, 48, 203, 125, 101, 173, 8, 92, 81, 54, 74, 157, 213, 69, 85, 63, 221, 158, 169, 58, 171, 104, 60, 148, 243, 74, 161, 48, 65, 154, 56, 186, 220, 192, 48, 15, 74, 70, 45, 255, 157, 237, 130, 36, 247, 202, 27, 0, 169, 76, 76, 16, 199, 57, 14, 206, 47, 182, 45, 109, 245, 50, 25, 61, 200, 240, 198, 85, 246, 173, 109, 188, 66, 53, 47, 61, 48, 18, 174, 185, 77, 43, 89, 172, 20, 85, 112, 129, 220, 29, 23, 103, 202, 202, 202, 202, 202, 202, 202, 202, 178, 179, 178, 196, 109, 109, 109, 109, 109, 109, 109, 225, 225, 178, 148, 196, 179, 178, 178, 114, 225, 85, 109, 109, 109, 109, 225, 178, 56, 109, 109, 109, 254, 109, 109, 109, 109, 27, 193, 58, 109, 109, 109, 254, 114, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 178, 178, 109, 109, 85, 196, 254, 254, 225, 172, 196, 172, 71, 85, 56, 254, 172, 172, 196, 225, 114, 56, 143, 196, 196, 177, 27, 254, 27, 32, 230, 225, 3, 27, 254, 90, 114, 85, 196, 254, 196, 225, 172, 148, 56, 143, 114, 27, 56, 85, 225, 8, 90, 196, 114, 230, 27, 196, 230, 56, 56, 172, 225, 254, 172, 196, 201, 178, 180, 178, 178, 178, 109, 109, 254, 225, 196, 27, 27, 225, 201, 196, 254, 225, 225, 56, 225, 27, 254, 56, 13, 27, 196, 254, 196, 225, 90, 225, 206, 254, 254, 225, 56, 225, 225, 254, 3, 254, 230, 27, 196, 172, 196, 56, 254, 32, 56, 85, 196, 143, 230, 27, 196, 201, 254, 27, 56, 114, 114, 225, 196, 225, 143, 254, 254, 196, 254, 196, 172, 143, 201, 230, 196, 196, 225, 196, 32, 254, 201, 143, 196, 235, 196, 196, 56, 114, 56, 153, 85, 109, 109, 109, 27, 85, 56, 196, 196, 196, 225, 225, 196, 8, 254, 90, 225, 143, 225, 254, 27, 8, 196, 27, 196, 114, 196, 230, 196, 254, 61, 85, 114, 3, 27, 32, 3, 254, 196, 56, 114, 143, 114, 85, 172, 90, 254, 172, 206, 196, 196, 56, 37, 61, 196, 32, 225, 27, 85, 172, 196, 114, 61, 196, 85, 148, 114, 225, 196, 225, 56, 56, 196, 225, 114, 27, 225, 196, 230, 201, 27, 27, 196, 254, 56, 254, 114, 56, 196, 85, 254, 56, 56, 143, 56, 196, 254, 143, 172, 85, 27, 225, 225, 196, 225, 114, 90, 225, 196, 27, 196, 172, 114, 85, 196, 27, 201, 56, 172, 225, 61, 27, 254, 90, 85, 225, 196, 225, 56, 196, 114, 201, 56, 172, 254, 225, 27, 196, 56, 254, 201, 172, 85, 148, 254, 27, 196, 56, 61, 143, 56, 56, 61, 201, 196, 148, 196, 114, 27, 114, 143, 196, 114, 225, 32, 225, 85, 254, 225, 196, 230, 196, 143, 56, 109, 109, 109, 148, 254, 27, 196, 254, 225, 225, 254, 3, 27, 225, 235, 254, 254, 114, 27, 61, 196, 61, 230, 201, 172, 196, 27, 196, 196, 196, 254, 172, 225, 254, 196, 114, 254, 114, 114, 254, 85, 196, 225, 230, 196, 85, 85, 196, 85, 119, 56, 114, 27, 196, 56, 254, 196, 27, 225, 85, 196, 56, 230, 225, 225, 196, 27, 71, 254, 196, 27, 201, 114, 225, 119, 254, 225, 230, 56, 225, 225, 172, 27, 27, 172, 196, 56, 225, 196, 196, 177, 119, 119, 225, 95, 27, 225, 27, 114, 85, 206, 196, 119, 196, 196, 254, 230, 172, 201, 196, 196, 27, 114, 225, 56, 196, 225, 196, 27, 196, 143, 254, 85, 56, 143, 119, 225, 114, 56, 225, 114, 196, 225, 85, 230, 225, 225, 254, 254, 254, 56, 225, 143, 254, 196, 172, 225, 201, 254, 196, 196, 3, 85, 235, 254, 3, 254, 27, 172, 196, 172, 85, 56, 254, 225, 225, 143, 32, 172, 143, 114, 27, 196, 143, 27, 85, 85, 254, 254, 254, 85, 148, 143, 225, 196, 196, 56, 148, 3, 254, 254, 56, 56, 172, 114, 85, 56, 85, 90, 85, 109, 109, 109, 109, 95, 37, 254, 225, 225, 27, 172, 114, 196, 230, 201, 196, 85, 27, 85, 85, 254, 254, 230, 177, 254, 225, 3, 114, 172, 196, 254, 114, 196, 27, 225, 61, 201, 114, 27, 177, 114, 172, 85, 196, 27, 27, 225, 56, 153, 225, 27, 61, 143, 196, 27, 85, 27, 56, 143, 172, 3, 196, 225, 196, 196, 85, 143, 27, 196, 56, 254, 56, 225, 196, 225, 56, 56, 196, 27, 56, 148, 196, 114, 114, 225, 27, 225, 85, 27, 172, 109, 109, 109, 14, 230, 238, 182, 252, 109, 245, 37, 24, 187, 152, 238, 184, 132, 6, 43, 149, 236, 145, 152, 59, 58, 211, 73, 221, 16, 203, 163, 221, 235, 5, 133, 107, 39, 127, 52, 44, 253, 35, 83, 20, 225, 120, 17, 10, 89, 24, 157, 230, 218, 15, 51, 54, 93, 133, 26, 156, 74, 66, 81, 39, 225, 198, 192, 196, 239, 145, 226, 94, 83, 106, 202, 60, 128, 178, 180, 180, 236, 248, 207, 102, 54, 68, 141, 223, 27, 101, 55, 99, 46, 204, 114, 160, 173, 105, 90, 180, 18, 183, 239, 237, 198, 23, 173, 185, 229, 137, 223, 216, 146, 255, 140, 198, 172, 238, 94, 4, 237, 98, 20, 131, 222, 211, 218, 241, 192, 176, 80, 237, 118, 88, 228, 18, 143, 234, 236, 4, 8, 239, 36, 4, 249, 146, 65, 105, 117, 110, 171, 148, 172, 196, 114, 214, 76, 1, 16, 185, 178, 54, 205, 117, 33, 96, 251, 187, 54, 168, 157, 80, 94, 61, 18, 42, 123, 69, 118, 17, 58, 146, 238, 116, 252, 141, 38, 37, 226, 250, 115, 169, 102, 38, 3, 186, 144, 78, 169, 193, 54, 80, 93, 66, 246, 214, 53, 151, 22, 65, 153, 156, 102, 38, 227, 92, 27, 216, 74, 160, 146, 185, 233, 67, 77, 91, 208, 78, 96, 129, 35, 188, 71, 10, 31, 62, 24, 57, 202, 52, 56, 227, 110, 107, 232, 64, 228, 81, 243, 88, 53, 21, 142, 32, 56, 26, 170, 113, 252, 137, 118, 44, 69, 214, 242, 57, 181, 250, 223, 93, 66, 77, 141, 224, 216, 200, 21, 146, 217, 25, 85, 214, 161, 137, 132, 157, 71, 196, 132, 178, 39, 119, 154, 112, 218, 84, 124, 202, 214, 217, 106, 159, 180, 109, 220, 130, 2, 190, 6, 85, 183, 134, 145, 39, 137, 49, 176, 205, 141, 158, 56, 79, 238, 21, 142, 0, 24, 83, 94, 11, 18, 5, 193, 231, 111, 198, 212, 46, 81, 221, 159, 76, 90, 172, 197, 128, 143, 132, 170, 191, 252, 181, 207, 3, 160, 142, 183, 219, 13, 226, 17, 254, 57, 171, 253, 26, 166, 90, 104, 231, 180, 55, 97, 101, 163, 213, 230, 201, 27, 77, 138, 80, 204, 85, 115, 161, 238, 184, 132, 184, 115, 66, 133, 226, 115, 229, 101, 6, 255, 39, 153, 225, 27, 61, 134, 145, 185, 9, 62, 184, 205, 109, 224, 83, 219, 37, 225, 32, 157, 157, 157, 157, 157, 157, 168, 56, 196, 85, 254, 56, 56, 157, 190, 157, 157, 157, 157, 157, 179, 168, 157, 157, 157, 157, 157, 168, 157, 190, 157, 157, 157, 157, 168, 157, 157, 157, 157, 168, 157, 157, 157, 157, 168, 157, 157, 168, 157, 190, 157, 157, 168, 157, 157, 157, 168, 157, 157, 168, 179, 168, 157, 190, 168, 157, 168, 157, 157, 168, 157, 157, 168, 157, 157, 179, 157, 157, 157, 168, 168, 179, 168, 168, 157, 168, 234, 157, 157, 157, 157, 196, 143, 56, 157, 157, 168, 157, 179, 157, 157, 179, 168, 157, 168, 157, 157, 157, 157, 157, 157, 157, 168, 190, 157, 168, 157, 157, 157, 157, 157, 157, 179, 157, 157, 168, 157, 168, 157, 157, 157, 157, 157, 157, 168, 179, 168, 168, 157, 190, 168, 168, 157, 157, 157, 157, 168, 157, 157, 157, 157, 157, 157, 157, 168, 157, 168, 234, 179, 157, 168, 157, 168, 168, 157, 168, 168, 157, 168, 157, 157, 168, 157, 168, 157, 179, 168, 179, 168, 157, 157, 157, 168, 168, 168, 157, 190, 157, 157, 201, 168, 168, 157, 157, 157, 157, 157, 157, 168, 157, 157, 157, 157, 157, 179, 157, 168, 157, 179, 168, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 179, 157, 168, 157, 157, 157, 157, 179, 179, 168, 143, 32, 172, 143, 114, 168, 168, 157, 179, 157, 168, 157, 168, 212, 157, 157, 157, 157, 179, 157, 157, 157, 157, 157, 157, 157, 157, 157, 168, 157, 157, 179, 157, 157, 157, 146, 65, 105, 117, 110, 171, 148, 172, 157, 157, 157, 179, 157, 157, 157, 168, 157, 61, 196, 61, 93, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 2, 2, 93, 2, 2, 2, 2, 2, 2, 2, 2, 2, 93, 93, 2, 93, 2, 2, 2, 2, 2, 2, 2, 2, 2, 19, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 93, 2, 93, 254, 143, 172, 85, 27, 225, 225, 196, 225, 114, 90, 225, 196, 27, 196, 2, 2, 2, 2, 2, 2, 93, 2, 2, 2, 2, 184, 2, 2, 2, 2, 2, 2, 2, 93, 2, 2, 19, 2, 184, 2, 2, 2, 93, 2, 2, 2, 2, 2, 2, 2, 2, 2, 93, 93, 2, 2, 93, 2, 2, 2, 2, 2, 2, 93, 93, 2, 2, 2, 2, 2, 2, 218, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 15, 141, 141, 141, 206, 141, 206, 141, 15, 81, 81, 81, 15, 141, 171, 145, 63, 141, 206, 195, 134, 207, 9, 141, 141, 141, 87, 101, 60, 163, 213, 58, 17, 206, 141, 129, 8, 195, 134, 207, 9, 143, 92, 248, 19, 181, 253, 45, 179, 118, 108, 252, 81, 102, 204, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 141, 206, 200, 193, 141, 141, 141, 154, 141, 141, 206, 206, 206, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 141, 15, 206, 180, 154, 94, 184, 141, 206, 141, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 174, 231, 156, 81, 81, 81, 141, 141, 141, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 141, 141, 141, 206, 141, 141, 206, 34, 215, 151, 182, 29, 237, 141, 141, 141, 206, 81, 81, 81, 81, 81, 81, 81, 206, 206, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 81, 81, 81, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 206, 141, 141, 81, 81, 81, 81, 141, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 141, 206, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 81, 141, 81, 81, 141, 206, 206, 14, 233, 141, 141, 141, 141, 81, 174, 231, 141, 206, 206, 141, 206, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 81, 81, 206, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 81, 81, 81, 81, 81, 141, 206, 81, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 81, 81, 206, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 81, 81, 206, 206, 141, 141, 141, 81, 81, 81, 81, 141, 81, 81, 81, 81, 156, 81, 81, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 141, 141, 81, 141, 81, 81, 81, 141, 206, 141, 141, 206, 141, 81, 141, 141, 155, 15, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 81, 141, 141, 141, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 81, 81, 81, 81, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 206, 206, 14, 141, 206, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 81, 81, 141, 206, 141, 141, 141, 141, 141, 141, 141, 206, 141, 188, 102, 121, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 206, 141, 141, 141, 141, 154, 141, 141, 141, 253, 45, 179, 118, 108, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 237, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 141, 141, 206, 141, 141, 15, 206, 195, 134, 207, 9, 141, 141, 141, 141, 141, 141, 15, 206, 141, 141, 81, 81, 81, 81, 81, 81, 141, 206, 14, 141, 206, 206, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 141, 15, 141, 141, 81, 81, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 81, 206, 206, 141, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 81, 81, 141, 206, 141, 141, 141, 81, 206, 15, 206, 206, 141, 81, 141, 81, 81, 81, 141, 71, 52, 224, 141, 206, 206, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 141, 81, 81, 81, 81, 141, 34, 215, 151, 182, 29, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 141, 81, 81, 81, 81, 206, 81, 81, 81, 81, 141, 206, 145, 48, 188, 102, 179, 125, 86, 50, 141, 206, 206, 141, 80, 141, 141, 141, 141, 81, 81, 206, 81, 206, 206, 206, 141, 141, 141, 206, 141, 81, 141, 206, 15, 141, 206, 141, 141, 141, 141, 141, 206, 81, 81, 81, 206, 141, 81, 81, 81, 81, 141, 141, 141, 206, 141, 206, 141, 141, 141, 81, 141, 141, 141, 206, 206, 81, 81, 81, 206, 141, 141, 141, 87, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 206, 141, 141, 22, 108, 141, 81, 81, 206, 219, 206, 141, 141, 141, 81, 81, 81, 81, 141, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 81, 206, 80, 141, 141, 141, 154, 141, 141, 141, 141, 141, 206, 108, 141, 81, 81, 81, 81, 81, 141, 141, 141, 81, 81, 81, 81, 141, 141, 15, 141, 141, 206, 141, 141, 15, 141, 141, 141, 210, 141, 146, 205, 141, 141, 141, 81, 81, 81, 81, 81, 81, 141, 141, 141, 141, 206, 141, 141, 141, 141, 81, 81, 81, 81, 141, 141, 81, 26, 2, 74, 22, 141, 141, 141, 141, 206, 141, 141, 141, 206, 141, 81, 81, 81, 206, 81, 141, 141, 81, 81, 81, 81, 2, 74, 22, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 206, 141, 141, 206, 141, 206, 206, 141, 141, 206, 141, 141, 141, 141, 15, 206, 141, 141, 206, 195, 134, 207, 81, 81, 81, 81, 81, 141, 141, 141, 80, 141, 141, 141, 141, 141, 141, 15, 206, 141, 81, 81, 81, 141, 141, 141, 141, 141, 15, 141, 206, 141, 141, 206, 141, 141, 81, 141, 141, 81, 15, 141, 141, 81, 81, 141, 141, 141, 206, 81, 141, 141, 206, 141, 141, 141, 81, 81, 81, 81, 81, 141, 141, 141, 141, 80, 141, 81, 81, 141, 141, 81, 26, 81, 81, 81, 81, 81, 81, 206, 206, 15, 141, 206, 206, 206, 141, 141, 141, 141, 81, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 15, 141, 141, 141, 206, 141, 141, 141, 141, 141, 141, 141, 141, 206, 141, 141, 141, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 206, 206, 141, 206, 206, 141, 206, 206, 141, 141, 15, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 206, 81, 141, 141, 206, 81, 141, 206, 15, 206, 141, 141, 141, 141, 141, 206, 141, 141, 141, 81, 81, 81, 206, 141, 141, 141, 141, 141, 141, 141, 206, 206, 141, 145, 141, 206, 141, 141, 141, 141, 141, 141, 141, 15, 141, 141, 141, 15, 141, 141, 141, 206, 15, 141, 15, 141, 141, 81, 206, 206, 141, 141, 206, 206, 141, 141, 141, 141, 141, 141, 141, 81, 81, 81, 81, 81, 141, 71, 52, 206, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 15, 141, 206, 141, 15, 141, 206, 15, 141, 145, 141, 206, 141, 15, 206, 206, 141, 141, 206, 206, 15, 141, 90, 85, 85, 172, 177, 85, 85, 3, 85, 85, 172, 85, 172, 85, 172, 95, 3, 85, 90, 85, 85, 85, 85, 85, 90, 3, 3, 85, 172, 85, 90})
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
const (
prime3bytes = 506832829
prime4bytes = 2654435761
prime5bytes = 889523592379
prime6bytes = 227718039650203
prime7bytes = 58295818150454627
prime8bytes = 0xcf1bbcdcb7a56463
)
// hashLen returns a hash of the lowest mls bytes of with length output bits.
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// length should always be < 32.
// Preferably length and mls should be a constant for inlining.
func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
case 3:
return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5:
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
return uint32((u * prime8bytes) >> (64 - length))
default:
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"github.com/klauspost/compress/huff0"
)
// history contains the information transferred between blocks.
type history struct {
// Literal decompression
huffTree *huff0.Scratch
// Sequence decompression
decoders sequenceDecs
recentOffsets [3]int
// History buffer...
b []byte
// ignoreBuffer is meant to ignore a number of bytes
// when checking for matches in history
ignoreBuffer int
windowSize int
allocFrameBuffer int // needed?
error bool
dict *dict
}
// reset will reset the history to initial state of a frame.
// The history must already have been initialized to the desired size.
func (h *history) reset() {
h.b = h.b[:0]
h.ignoreBuffer = 0
h.error = false
h.recentOffsets = [3]int{1, 4, 8}
h.decoders.freeDecoders()
h.decoders = sequenceDecs{br: h.decoders.br}
h.freeHuffDecoder()
h.huffTree = nil
h.dict = nil
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
func (h *history) freeHuffDecoder() {
if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
h.huffTree = nil
}
}
}
func (h *history) setDict(dict *dict) {
if dict == nil {
return
}
h.dict = dict
h.decoders.litLengths = dict.llDec
h.decoders.offsets = dict.ofDec
h.decoders.matchLengths = dict.mlDec
h.decoders.dict = dict.content
h.recentOffsets = dict.offsets
h.huffTree = dict.litEnc
}
// append bytes to history.
// This function will make sure there is space for it,
// if the buffer has been allocated with enough extra space.
func (h *history) append(b []byte) {
if len(b) >= h.windowSize {
// Discard all history by simply overwriting
h.b = h.b[:h.windowSize]
copy(h.b, b[len(b)-h.windowSize:])
return
}
// If there is space, append it.
if len(b) < cap(h.b)-len(h.b) {
h.b = append(h.b, b...)
return
}
// Move data down so we only have window size left.
// We know we have less than window size in b at this point.
discard := len(b) + len(h.b) - h.windowSize
copy(h.b, h.b[discard:])
h.b = h.b[:h.windowSize]
copy(h.b[h.windowSize-len(b):], b)
}
// ensureBlock will ensure there is space for at least one block...
func (h *history) ensureBlock() {
if cap(h.b) < h.allocFrameBuffer {
h.b = make([]byte, 0, h.allocFrameBuffer)
return
}
avail := cap(h.b) - len(h.b)
if avail >= h.windowSize || avail > maxCompressedBlockSize {
return
}
// Move data down so we only have window size left.
// We know we have less than window size in b at this point.
discard := len(h.b) - h.windowSize
copy(h.b, h.b[discard:])
h.b = h.b[:h.windowSize]
}
// append bytes to history without ever discarding anything.
func (h *history) appendKeep(b []byte) {
h.b = append(h.b, b...)
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"errors"
"fmt"
"io"
)
type seq struct {
litLen uint32
matchLen uint32
offset uint32
// Codes are stored here for the encoder
// so they only have to be looked up once.
llCode, mlCode, ofCode uint8
}
type seqVals struct {
ll, ml, mo int
}
func (s seq) String() string {
if s.offset <= 3 {
if s.offset == 0 {
return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)")
}
return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)")
}
return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)")
}
type seqCompMode uint8
const (
compModePredefined seqCompMode = iota
compModeRLE
compModeFSE
compModeRepeat
)
type sequenceDec struct {
// decoder keeps track of the current state and updates it from the bitstream.
fse *fseDecoder
state fseState
repeat bool
}
// init the state of the decoder with input from stream.
func (s *sequenceDec) init(br *bitReader) error {
if s.fse == nil {
return errors.New("sequence decoder not defined")
}
s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<<s.fse.actualTableLog])
return nil
}
// sequenceDecs contains all 3 sequence decoders and their state.
type sequenceDecs struct {
litLengths sequenceDec
offsets sequenceDec
matchLengths sequenceDec
prevOffset [3]int
dict []byte
literals []byte
out []byte
nSeqs int
br *bitReader
seqSize int
windowSize int
maxBits uint8
maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
if err := s.litLengths.init(br); err != nil {
return errors.New("litLengths:" + err.Error())
}
if err := s.offsets.init(br); err != nil {
return errors.New("offsets:" + err.Error())
}
if err := s.matchLengths.init(br); err != nil {
return errors.New("matchLengths:" + err.Error())
}
s.br = br
s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.windowSize = hist.windowSize
s.out = out
s.dict = nil
if hist.dict != nil {
s.dict = hist.dict.content
}
return nil
}
func (s *sequenceDecs) freeDecoders() {
if f := s.litLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
s.litLengths.fse = nil
}
if f := s.offsets.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
s.offsets.fse = nil
}
if f := s.matchLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
s.matchLengths.fse = nil
}
}
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
if len(s.dict) == 0 {
return s.executeSimple(seqs, hist)
}
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
for _, seq := range seqs {
// Add literals
copy(out[t:], s.literals[:seq.ll])
t += seq.ll
s.literals = s.literals[seq.ll:]
// Copy from dictionary...
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
}
// we may be in dictionary.
dictO := len(s.dict) - (seq.mo - (t + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
}
end := dictO + seq.ml
if end > len(s.dict) {
n := len(s.dict) - dictO
copy(out[t:], s.dict[dictO:])
t += n
seq.ml -= n
} else {
copy(out[t:], s.dict[dictO:end])
t += end - dictO
continue
}
}
// Copy from history.
if v := seq.mo - t; v > 0 {
// v is the start position in history from end.
start := len(hist) - v
if seq.ml > v {
// Some goes into current block.
// Copy remainder of history
copy(out[t:], hist[start:])
t += v
seq.ml -= v
} else {
copy(out[t:], hist[start:start+seq.ml])
t += seq.ml
continue
}
}
// We must be in current buffer now
if seq.ml > 0 {
start := t - seq.mo
if seq.ml <= t-start {
// No overlap
copy(out[t:], out[start:start+seq.ml])
t += seq.ml
continue
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
src := out[start : start+seq.ml]
dst := out[t:]
dst = dst[:len(src)]
t += len(src)
// Destination is the space we just added.
for i := range src {
dst[i] = src[i]
}
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(hist []byte) error {
supported, err := s.decodeSyncSimple(hist)
if supported {
return err
}
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
if debugDecoder {
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
}
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
if ll > len(s.literals) {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations.
// over-allocating here can create a large amount of GC pressure so we try to keep
// it as contained as possible
used := len(out) - startSize
addBytes := 256 + ll + ml + used>>2
// Clamp to max block size.
if used+addBytes > maxBlockSize {
addBytes = maxBlockSize - used
}
out = append(out, make([]byte, addBytes)...)
out = out[:len(out)-addBytes]
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
// Add literals
out = append(out, s.literals[:ll]...)
s.literals = s.literals[ll:]
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
out = append(out, s.dict[dictO:]...)
ml -= len(s.dict) - dictO
} else {
out = append(out, s.dict[dictO:end]...)
mo = 0
ml = 0
}
}
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
if v := mo - len(out); v > 0 {
// v is the start position in history from end.
start := len(hist) - v
if ml > v {
// Some goes into current block.
// Copy remainder of history
out = append(out, hist[start:]...)
ml -= v
} else {
out = append(out, hist[start:start+ml]...)
ml = 0
}
}
// We must be in current buffer now
if ml > 0 {
start := len(out) - mo
if ml <= len(out)-start {
// No overlap
out = append(out, out[start:start+ml]...)
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
out = out[:len(out)+ml]
src := out[start : start+ml]
// Destination is the space we just added.
dst := out[len(out)-ml:]
dst = dst[:len(src)]
for i := range src {
dst[i] = src[i]
}
}
}
if i == 0 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
// Add final literals
s.out = append(out, s.literals...)
return br.close()
}
var bitMask [16]uint16
func init() {
for i := range bitMask[:] {
bitMask[i] = uint16((1 << uint(i)) - 1)
}
}
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
ml, mlB := mlState.final()
mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fill()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fill()
}
// matchlength+literal length, max 32 bits
ml += br.getBits(mlB)
ll += br.getBits(llB)
mo = s.adjustOffset(mo, ll, moB)
return
}
func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
if offsetB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = offset
return offset
}
if litLen == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
offset++
}
if offset == 0 {
return s.prevOffset[0]
}
var temp int
if offset == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[offset]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("temp was 0")
temp = 1
}
if offset != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
return temp
}
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package zstd
import (
"fmt"
"io"
"github.com/klauspost/compress/internal/cpuinfo"
)
type decodeSyncAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
litRemain int
out []byte
outPosition int
literals []byte
litPosition int
history []byte
windowSize int
ll int // set on error (not for all errors, please refer to _generate/gen.go)
ml int // set on error (not for all errors, please refer to _generate/gen.go)
mo int // set on error (not for all errors, please refer to _generate/gen.go)
}
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
//
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
//
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
//
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// decode sequences from the stream with the provided history but without a dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if len(s.dict) > 0 {
return false, nil
}
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
return false, nil
}
// FIXME: Using unsafe memory copies leads to rare, random crashes
// with fuzz testing. It is therefore disabled for now.
const useSafe = true
/*
useSafe := false
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
useSafe = true
}
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true
}
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
useSafe = true
}
*/
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeSyncAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
iteration: s.nSeqs - 1,
litRemain: len(s.literals),
out: s.out,
outPosition: len(s.out),
literals: s.literals,
windowSize: s.windowSize,
history: hist,
}
s.seqSize = 0
startSize := len(s.out)
var errCode int
if cpuinfo.HasBMI2() {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
}
} else {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
}
}
switch errCode {
case noError:
break
case errorMatchLenOfsMismatch:
return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
case errorMatchLenTooBig:
return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
case errorMatchOffTooBig:
return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
ctx.mo, ctx.outPosition+len(hist)-startSize)
case errorNotEnoughLiterals:
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorOverread:
return true, io.ErrUnexpectedEOF
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
}
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
return true, err
}
s.literals = s.literals[ctx.litPosition:]
t := ctx.outPosition
s.out = s.out[:t]
// Add final literals
s.out = append(s.out, s.literals...)
if debugDecoder {
t += len(s.literals)
if t != len(s.out) {
panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
}
}
return true, nil
}
// --------------------------------------------------------------------------------
type decodeAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
seqs []seqVals
litRemain int
}
const noError = 0
// error reported when mo == 0 && ml > 0
const errorMatchLenOfsMismatch = 1
// error reported when ml > maxMatchLen
const errorMatchLenTooBig = 2
// error reported when mo > available history or mo > s.windowSize
const errorMatchOffTooBig = 3
// error reported when the sum of literal lengths exeeceds the literal buffer size
const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// error reported when bits are overread.
const errorOverread = 6
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
seqs: seqs,
iteration: len(seqs) - 1,
litRemain: len(s.literals),
}
if debugDecoder {
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
if cpuinfo.HasBMI2() {
if lte56bits {
errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
}
} else {
if lte56bits {
errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_amd64(s, br, &ctx)
}
}
if errCode != 0 {
i := len(seqs) - ctx.iteration - 1
switch errCode {
case errorMatchLenOfsMismatch:
ml := ctx.seqs[i].ml
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
case errorMatchLenTooBig:
ml := ctx.seqs[i].ml
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
case errorOverread:
return io.ErrUnexpectedEOF
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {
return fmt.Errorf("literal count is too big: total available %d, total requested %d",
len(s.literals), len(s.literals)-ctx.litRemain)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if debugDecoder {
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// --------------------------------------------------------------------------------
type executeAsmContext struct {
seqs []seqVals
seqIndex int
out []byte
history []byte
literals []byte
outPosition int
litPosition int
windowSize int
}
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
//
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
//
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
//
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
ctx := executeAsmContext{
seqs: seqs,
seqIndex: 0,
out: out,
history: hist,
outPosition: t,
litPosition: 0,
literals: s.literals,
windowSize: s.windowSize,
}
var ok bool
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
} else {
ok = sequenceDecs_executeSimple_amd64(&ctx)
}
if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
}
s.literals = s.literals[ctx.litPosition:]
t = ctx.outPosition
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}
package zstd
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/klauspost/compress/zip"
)
func TestSequenceDecsAdjustOffset(t *testing.T) {
type result struct {
offset int
prevOffset [3]int
}
tc := []struct {
offset int
litLen int
offsetB uint8
prevOffset [3]int
res result
}{{
offset: 444,
litLen: 0,
offsetB: 42,
prevOffset: [3]int{111, 222, 333},
res: result{
offset: 444,
prevOffset: [3]int{444, 111, 222},
},
}, {
offset: 0,
litLen: 1,
offsetB: 0,
prevOffset: [3]int{111, 222, 333},
res: result{
offset: 111,
prevOffset: [3]int{111, 222, 333},
},
}, {
offset: -1,
litLen: 0,
offsetB: 0,
prevOffset: [3]int{111, 222, 333},
res: result{
offset: 111,
prevOffset: [3]int{111, 222, 333},
},
}, {
offset: 1,
litLen: 1,
offsetB: 0,
prevOffset: [3]int{111, 222, 333},
res: result{
offset: 222,
prevOffset: [3]int{222, 111, 333},
},
}, {
offset: 2,
litLen: 1,
offsetB: 0,
prevOffset: [3]int{111, 222, 333},
res: result{
offset: 333,
prevOffset: [3]int{333, 111, 222},
},
}, {
offset: 3,
litLen: 1,
offsetB: 0,
prevOffset: [3]int{111, 222, 333},
res: result{
offset: 110, // s.prevOffset[0] - 1
prevOffset: [3]int{110, 111, 222},
},
}, {
offset: 3,
litLen: 1,
offsetB: 0,
prevOffset: [3]int{1, 222, 333},
res: result{
offset: 1,
prevOffset: [3]int{1, 1, 222},
},
},
}
for i := range tc {
// given
var sd sequenceDecs
for j := 0; j < 3; j++ {
sd.prevOffset[j] = tc[i].prevOffset[j]
}
// when
offset := sd.adjustOffset(tc[i].offset, tc[i].litLen, tc[i].offsetB)
// then
if offset != tc[i].res.offset {
t.Logf("result: %d", offset)
t.Logf("expected: %d", tc[i].res.offset)
t.Errorf("testcase #%d: wrong function result", i)
}
for j := 0; j < 3; j++ {
if sd.prevOffset[j] != tc[i].res.prevOffset[j] {
t.Logf("result: %v", sd.prevOffset)
t.Logf("expected: %v", tc[i].res.prevOffset)
t.Errorf("testcase #%d: sd.prevOffset got wrongly updated", i)
break
}
}
}
}
type testSequence struct {
n, lits, win int
prevOffsets [3]int
}
func (s *testSequence) parse(fn string) (ok bool) {
n, err := fmt.Sscanf(fn, "n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", &s.n, &s.lits, &s.prevOffsets[0], &s.prevOffsets[1], &s.prevOffsets[2], &s.win)
ok = err == nil && n == 6
if !ok {
fmt.Println("Unable to parse:", err, n)
}
return ok
}
func readDecoders(tb testing.TB, buf *bytes.Buffer, ref testSequence) sequenceDecs {
s := sequenceDecs{
litLengths: sequenceDec{fse: &fseDecoder{}},
offsets: sequenceDec{fse: &fseDecoder{}},
matchLengths: sequenceDec{fse: &fseDecoder{}},
prevOffset: ref.prevOffsets,
dict: nil,
literals: make([]byte, ref.lits, ref.lits+compressedBlockOverAlloc),
out: nil,
nSeqs: ref.n,
br: nil,
seqSize: 0,
windowSize: ref.win,
maxBits: 0,
}
s.litLengths.fse.mustReadFrom(buf)
s.matchLengths.fse.mustReadFrom(buf)
s.offsets.fse.mustReadFrom(buf)
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.br = &bitReader{}
return s
}
func Test_seqdec_decode_regression(t *testing.T) {
zr := testCreateZipReader("testdata/decode-regression.zip", t)
for _, tt := range zr.File {
t.Run(tt.Name, func(t *testing.T) {
f, err := tt.Open()
if err != nil {
t.Error(err)
return
}
defer f.Close()
// Note: make sure we create stream reader
dec, err := NewReader(f, WithDecoderConcurrency(4))
if err != nil {
t.Error(err)
return
}
var buf []byte
_, err = io.ReadFull(dec, buf)
if err != nil {
t.Error(err)
return
}
})
}
}
func Test_seqdec_decoder(t *testing.T) {
const writeWant = false
var buf bytes.Buffer
zw := zip.NewWriter(&buf)
want := map[string][]seqVals{}
var wantOffsets = map[string][3]int{}
if !writeWant {
zr := testCreateZipReader("testdata/seqs-want.zip", t)
tb := t
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
o, err := tt.Open()
if err != nil {
t.Fatal(err)
}
r := csv.NewReader(o)
recs, err := r.ReadAll()
if err != nil {
t.Fatal(err)
}
for i, rec := range recs {
if i == 0 {
var o [3]int
o[0], _ = strconv.Atoi(rec[0])
o[1], _ = strconv.Atoi(rec[1])
o[2], _ = strconv.Atoi(rec[2])
wantOffsets[tt.Name] = o
continue
}
s := seqVals{}
s.mo, _ = strconv.Atoi(rec[0])
s.ml, _ = strconv.Atoi(rec[1])
s.ll, _ = strconv.Atoi(rec[2])
want[tt.Name] = append(want[tt.Name], s)
}
o.Close()
}
}
zr := testCreateZipReader("testdata/seqs.zip", t)
tb := t
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
r, err := tt.Open()
if err != nil {
tb.Error(err)
return
}
seqData, err := io.ReadAll(r)
if err != nil {
tb.Error(err)
return
}
var buf = bytes.NewBuffer(seqData)
s := readDecoders(tb, buf, ref)
seqs := make([]seqVals, ref.n)
t.Run(tt.Name, func(t *testing.T) {
fatalIf := func(err error) {
if err != nil {
t.Fatal(err)
}
}
fatalIf(s.br.init(buf.Bytes()))
fatalIf(s.litLengths.init(s.br))
fatalIf(s.offsets.init(s.br))
fatalIf(s.matchLengths.init(s.br))
err := s.decode(seqs)
if err != nil {
t.Error(err)
}
if writeWant {
w, err := zw.Create(tt.Name)
fatalIf(err)
c := csv.NewWriter(w)
w.Write([]byte(fmt.Sprintf("%d,%d,%d\n", s.prevOffset[0], s.prevOffset[1], s.prevOffset[2])))
for _, seq := range seqs {
c.Write([]string{strconv.Itoa(seq.mo), strconv.Itoa(seq.ml), strconv.Itoa(seq.ll)})
}
c.Flush()
} else {
if s.prevOffset != wantOffsets[tt.Name] {
t.Errorf("want offsets %v, got %v", wantOffsets[tt.Name], s.prevOffset)
}
if !reflect.DeepEqual(want[tt.Name], seqs) {
t.Errorf("got %v\nwant %v", seqs, want[tt.Name])
}
}
})
}
if writeWant {
zw.Close()
os.WriteFile("testdata/seqs-want.zip", buf.Bytes(), os.ModePerm)
}
}
func Test_seqdec_execute(t *testing.T) {
zr := testCreateZipReader("testdata/seqs.zip", t)
tb := t
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
r, err := tt.Open()
if err != nil {
tb.Error(err)
return
}
seqData, err := io.ReadAll(r)
if err != nil {
tb.Error(err)
return
}
var buf = bytes.NewBuffer(seqData)
s := readDecoders(tb, buf, ref)
seqs := make([]seqVals, ref.n)
fatalIf := func(err error) {
if err != nil {
tb.Fatal(err)
}
}
fatalIf(s.br.init(buf.Bytes()))
fatalIf(s.litLengths.init(s.br))
fatalIf(s.offsets.init(s.br))
fatalIf(s.matchLengths.init(s.br))
fatalIf(s.decode(seqs))
hist := make([]byte, ref.win)
lits := s.literals
t.Run(tt.Name, func(t *testing.T) {
s.literals = lits
if len(s.out) > 0 {
s.out = s.out[:0]
}
err := s.execute(seqs, hist)
if err != nil {
t.Fatal(err)
}
if len(s.out) != s.seqSize {
t.Errorf("want %d != got %d", s.seqSize, len(s.out))
}
})
}
}
func Test_seqdec_decodeSync(t *testing.T) {
zr := testCreateZipReader("testdata/seqs.zip", t)
tb := t
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
r, err := tt.Open()
if err != nil {
tb.Error(err)
return
}
seqData, err := io.ReadAll(r)
if err != nil {
tb.Error(err)
return
}
var buf = bytes.NewBuffer(seqData)
s := readDecoders(tb, buf, ref)
lits := s.literals
hist := make([]byte, ref.win)
t.Run(tt.Name, func(t *testing.T) {
fatalIf := func(err error) {
if err != nil {
t.Fatal(err)
}
}
fatalIf(s.br.init(buf.Bytes()))
fatalIf(s.litLengths.init(s.br))
fatalIf(s.offsets.init(s.br))
fatalIf(s.matchLengths.init(s.br))
s.literals = lits
if len(s.out) > 0 {
s.out = s.out[:0]
}
err := s.decodeSync(hist)
if err != nil {
t.Fatal(err)
}
})
}
}
func Benchmark_seqdec_decode(b *testing.B) {
benchmark_seqdec_decode(b)
}
func benchmark_seqdec_decode(b *testing.B) {
zr := testCreateZipReader("testdata/seqs.zip", b)
tb := b
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
r, err := tt.Open()
if err != nil {
tb.Error(err)
return
}
seqData, err := io.ReadAll(r)
if err != nil {
tb.Error(err)
return
}
var buf = bytes.NewBuffer(seqData)
s := readDecoders(tb, buf, ref)
seqs := make([]seqVals, ref.n)
b.Run(tt.Name, func(b *testing.B) {
fatalIf := func(err error) {
if err != nil {
b.Fatal(err)
}
}
b.ReportAllocs()
b.ResetTimer()
t := time.Now()
decoded := 0
remain := uint(0)
for i := 0; i < b.N; i++ {
fatalIf(s.br.init(buf.Bytes()))
fatalIf(s.litLengths.init(s.br))
fatalIf(s.offsets.init(s.br))
fatalIf(s.matchLengths.init(s.br))
remain = s.br.remain()
err := s.decode(seqs)
if err != nil {
b.Fatal(err)
}
decoded += ref.n
}
b.ReportMetric(float64(decoded)/time.Since(t).Seconds(), "seq/s")
b.ReportMetric(float64(remain)/float64(s.nSeqs), "b/seq")
})
}
}
func Benchmark_seqdec_execute(b *testing.B) {
zr := testCreateZipReader("testdata/seqs.zip", b)
tb := b
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
r, err := tt.Open()
if err != nil {
tb.Error(err)
return
}
seqData, err := io.ReadAll(r)
if err != nil {
tb.Error(err)
return
}
var buf = bytes.NewBuffer(seqData)
s := readDecoders(tb, buf, ref)
seqs := make([]seqVals, ref.n)
fatalIf := func(err error) {
if err != nil {
b.Fatal(err)
}
}
fatalIf(s.br.init(buf.Bytes()))
fatalIf(s.litLengths.init(s.br))
fatalIf(s.offsets.init(s.br))
fatalIf(s.matchLengths.init(s.br))
fatalIf(s.decode(seqs))
hist := make([]byte, ref.win)
lits := s.literals
b.Run(tt.Name, func(b *testing.B) {
b.ReportAllocs()
b.SetBytes(int64(s.seqSize))
b.ResetTimer()
t := time.Now()
decoded := 0
for i := 0; i < b.N; i++ {
s.literals = lits
if len(s.out) > 0 {
s.out = s.out[:0]
}
fatalIf(s.execute(seqs, hist))
decoded += ref.n
}
b.ReportMetric(float64(decoded)/time.Since(t).Seconds(), "seq/s")
})
}
}
func Benchmark_seqdec_decodeSync(b *testing.B) {
zr := testCreateZipReader("testdata/seqs.zip", b)
tb := b
for _, tt := range zr.File {
var ref testSequence
if !ref.parse(tt.Name) {
tb.Skip("unable to parse:", tt.Name)
}
r, err := tt.Open()
if err != nil {
tb.Error(err)
return
}
seqData, err := io.ReadAll(r)
if err != nil {
tb.Error(err)
return
}
var buf = bytes.NewBuffer(seqData)
s := readDecoders(tb, buf, ref)
lits := s.literals
hist := make([]byte, ref.win)
b.Run(tt.Name, func(b *testing.B) {
fatalIf := func(err error) {
if err != nil {
b.Fatal(err)
}
}
decoded := 0
b.ReportAllocs()
b.ResetTimer()
t := time.Now()
for i := 0; i < b.N; i++ {
fatalIf(s.br.init(buf.Bytes()))
fatalIf(s.litLengths.init(s.br))
fatalIf(s.offsets.init(s.br))
fatalIf(s.matchLengths.init(s.br))
s.literals = lits
if len(s.out) > 0 {
s.out = s.out[:0]
}
err := s.decodeSync(hist)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(s.out)))
decoded += ref.n
}
b.ReportMetric(float64(decoded)/time.Since(t).Seconds(), "seq/s")
})
}
}
func testCreateZipReader(path string, tb testing.TB) *zip.Reader {
failOnError := func(err error) {
if err != nil {
tb.Fatal(err)
}
}
data, err := os.ReadFile(path)
failOnError(err)
zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
failOnError(err)
return zr
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import "math/bits"
type seqCoders struct {
llEnc, ofEnc, mlEnc *fseEncoder
llPrev, ofPrev, mlPrev *fseEncoder
}
// swap coders with another (block).
func (s *seqCoders) swap(other *seqCoders) {
*s, *other = *other, *s
}
// setPrev will update the previous encoders to the actually used ones
// and make sure a fresh one is in the main slot.
func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) {
compareSwap := func(used *fseEncoder, current, prev **fseEncoder) {
// We used the new one, more current to history and reuse the previous history
if *current == used {
*prev, *current = *current, *prev
c := *current
p := *prev
c.reUsed = false
p.reUsed = true
return
}
if used == *prev {
return
}
// Ensure we cannot reuse by accident
prevEnc := *prev
prevEnc.symbolLen = 0
}
compareSwap(ll, &s.llEnc, &s.llPrev)
compareSwap(ml, &s.mlEnc, &s.mlPrev)
compareSwap(of, &s.ofEnc, &s.ofPrev)
}
func highBit(val uint32) (n uint32) {
return uint32(bits.Len32(val) - 1)
}
var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 16, 17, 17, 18, 18, 19, 19,
20, 20, 20, 20, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22,
23, 23, 23, 23, 23, 23, 23, 23,
24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24}
// Up to 6 bits
const maxLLCode = 35
// llBitsTable translates from ll code to number of bits.
var llBitsTable = [maxLLCode + 1]byte{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3,
4, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16}
// llCode returns the code that represents the literal length requested.
func llCode(litLength uint32) uint8 {
const llDeltaCode = 19
if litLength <= 63 {
return llCodeTable[litLength&63]
}
return uint8(highBit(litLength)) + llDeltaCode
}
var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}
// Up to 6 bits
const maxMLCode = 52
// mlBitsTable translates from ml code to number of bits.
var mlBitsTable = [maxMLCode + 1]byte{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3,
4, 4, 5, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16}
// note : mlBase = matchLength - MINMATCH;
// because it's the format it's stored in seqStore->sequences
func mlCode(mlBase uint32) uint8 {
const mlDeltaCode = 36
if mlBase <= 127 {
return mlCodeTable[mlBase&127]
}
return uint8(highBit(mlBase)) + mlDeltaCode
}
func ofCode(offset uint32) uint8 {
// A valid offset will always be > 0.
return uint8(bits.Len32(offset) - 1)
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
import (
"encoding/binary"
"errors"
"hash/crc32"
"io"
"github.com/klauspost/compress/huff0"
snappy "github.com/klauspost/compress/internal/snapref"
)
const (
snappyTagLiteral = 0x00
snappyTagCopy1 = 0x01
snappyTagCopy2 = 0x02
snappyTagCopy4 = 0x03
)
const (
snappyChecksumSize = 4
snappyMagicBody = "sNaPpY"
// snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
snappyMaxBlockSize = 65536
// snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
snappyMaxEncodedLenOfMaxBlockSize = 76490
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var (
// ErrSnappyCorrupt reports that the input is invalid.
ErrSnappyCorrupt = errors.New("snappy: corrupt input")
// ErrSnappyTooLarge reports that the uncompressed length is too large.
ErrSnappyTooLarge = errors.New("snappy: decoded block is too large")
// ErrSnappyUnsupported reports that the input isn't supported.
ErrSnappyUnsupported = errors.New("snappy: unsupported input")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd.
// Conversion is done by converting the stream directly from Snappy without intermediate
// full decoding.
// Therefore the compression ratio is much less than what can be done by a full decompression
// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without
// any errors being generated.
// No CRC value is being generated and not all CRC values of the Snappy stream are checked.
// However, it provides really fast recompression of Snappy streams.
// The converter can be reused to avoid allocations, even after errors.
type SnappyConverter struct {
r io.Reader
err error
buf []byte
block *blockEnc
}
// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'.
// If any error is detected on the Snappy stream it is returned.
// The number of bytes written is returned.
func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
initPredefined()
r.err = nil
r.r = in
if r.block == nil {
r.block = &blockEnc{}
r.block.init()
}
r.block.initNewEncode()
if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize {
r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize)
}
r.block.litEnc.Reuse = huff0.ReusePolicyNone
var written int64
var readHeader bool
{
header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
var n int
n, r.err = w.Write(header)
if r.err != nil {
return written, r.err
}
written += int64(n)
}
for {
if !r.readFull(r.buf[:4], true) {
// Add empty last block
r.block.reset(nil)
r.block.last = true
err := r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
n, err := w.Write(r.block.output)
if err != nil {
return written, err
}
written += int64(n)
return written, r.err
}
chunkType := r.buf[0]
if !readHeader {
if chunkType != chunkTypeStreamIdentifier {
println("chunkType != chunkTypeStreamIdentifier", chunkType)
r.err = ErrSnappyCorrupt
return written, r.err
}
readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
println("chunkLen > len(r.buf)", chunkType)
r.err = ErrSnappyUnsupported
return written, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < snappyChecksumSize {
println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
r.err = ErrSnappyCorrupt
return written, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return written, r.err
}
//checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[snappyChecksumSize:]
n, hdr, err := snappyDecodedLen(buf)
if err != nil {
r.err = err
return written, r.err
}
buf = buf[hdr:]
if n > snappyMaxBlockSize {
println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
r.err = ErrSnappyCorrupt
return written, r.err
}
r.block.reset(nil)
r.block.pushOffsets()
if err := decodeSnappy(r.block, buf); err != nil {
r.err = err
return written, r.err
}
if r.block.size+r.block.extraLits != n {
printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits)
r.err = ErrSnappyCorrupt
return written, r.err
}
err = r.block.encode(nil, false, false)
switch err {
case errIncompressible:
r.block.popOffsets()
r.block.reset(nil)
r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen])
if err != nil {
return written, err
}
err = r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
case nil:
default:
return written, err
}
n, r.err = w.Write(r.block.output)
if r.err != nil {
return written, r.err
}
written += int64(n)
continue
case chunkTypeUncompressedData:
if debugEncoder {
println("Uncompressed, chunklen", chunkLen)
}
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < snappyChecksumSize {
println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize)
r.err = ErrSnappyCorrupt
return written, r.err
}
r.block.reset(nil)
buf := r.buf[:snappyChecksumSize]
if !r.readFull(buf, false) {
return written, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - snappyChecksumSize
if n > snappyMaxBlockSize {
println("n > snappyMaxBlockSize", n, snappyMaxBlockSize)
r.err = ErrSnappyCorrupt
return written, r.err
}
r.block.literals = r.block.literals[:n]
if !r.readFull(r.block.literals, false) {
return written, r.err
}
if snappyCRC(r.block.literals) != checksum {
println("literals crc mismatch")
r.err = ErrSnappyCorrupt
return written, r.err
}
err := r.block.encodeLits(r.block.literals, false)
if err != nil {
return written, err
}
n, r.err = w.Write(r.block.output)
if r.err != nil {
return written, r.err
}
written += int64(n)
continue
case chunkTypeStreamIdentifier:
if debugEncoder {
println("stream id", chunkLen, len(snappyMagicBody))
}
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(snappyMagicBody) {
println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody))
r.err = ErrSnappyCorrupt
return written, r.err
}
if !r.readFull(r.buf[:len(snappyMagicBody)], false) {
return written, r.err
}
for i := 0; i < len(snappyMagicBody); i++ {
if r.buf[i] != snappyMagicBody[i] {
println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i)
r.err = ErrSnappyCorrupt
return written, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
println("chunkType <= 0x7f")
r.err = ErrSnappyUnsupported
return written, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return written, r.err
}
}
}
// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read.
func decodeSnappy(blk *blockEnc, src []byte) error {
//decodeRef(make([]byte, snappyMaxBlockSize), src)
var s, length int
lits := blk.extraLits
var offset uint32
for s < len(src) {
switch src[s] & 0x03 {
case snappyTagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, src)
return ErrSnappyCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, src)
return ErrSnappyCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, src)
return ErrSnappyCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, src)
return ErrSnappyCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
if x > snappyMaxBlockSize {
println("x > snappyMaxBlockSize", x, snappyMaxBlockSize)
return ErrSnappyCorrupt
}
length = int(x) + 1
if length <= 0 {
println("length <= 0 ", length)
return errUnsupportedLiteralLength
}
//if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s {
// return ErrSnappyCorrupt
//}
blk.literals = append(blk.literals, src[s:s+length]...)
//println(length, "litLen")
lits += length
s += length
continue
case snappyTagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, len(src))
return ErrSnappyCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])
case snappyTagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, len(src))
return ErrSnappyCorrupt
}
length = 1 + int(src[s-3])>>2
offset = uint32(src[s-2]) | uint32(src[s-1])<<8
case snappyTagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
println("uint(s) > uint(len(src)", s, len(src))
return ErrSnappyCorrupt
}
length = 1 + int(src[s-5])>>2
offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ {
println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits)
return ErrSnappyCorrupt
}
// Check if offset is one of the recent offsets.
// Adjusts the output offset accordingly.
// Gives a tiny bit of compression, typically around 1%.
if false {
offset = blk.matchOffset(offset, uint32(lits))
} else {
offset += 3
}
blk.sequences = append(blk.sequences, seq{
litLen: uint32(lits),
offset: offset,
matchLen: uint32(length) - zstdMinMatch,
})
blk.size += length + lits
lits = 0
}
blk.extraLits = lits
return nil
}
func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrSnappyCorrupt
}
return false
}
return true
}
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func snappyCRC(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return c>>15 | c<<17 + 0xa282ead8
}
// snappyDecodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrSnappyCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrSnappyTooLarge
}
return int(v), n, nil
}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
import (
"errors"
"io"
"sync"
)
// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip.
// See https://www.winzip.com/win/en/comp_info.html
const ZipMethodWinZip = 93
// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression.
// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression.
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
// zipReaderPool is the default reader pool.
var zipReaderPool = sync.Pool{New: func() interface{} {
z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
if err != nil {
panic(err)
}
return z
}}
// newZipReader creates a pooled zip decompressor.
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
pool := &zipReaderPool
if len(opts) > 0 {
opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
// Force concurrency 1
opts = append(opts, WithDecoderConcurrency(1))
// Create our own pool
pool = &sync.Pool{}
}
return func(r io.Reader) io.ReadCloser {
dec, ok := pool.Get().(*Decoder)
if ok {
dec.Reset(r)
} else {
d, err := NewReader(r, opts...)
if err != nil {
panic(err)
}
dec = d
}
return &pooledZipReader{dec: dec, pool: pool}
}
}
type pooledZipReader struct {
mu sync.Mutex // guards Close and Read
pool *sync.Pool
dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
if err == io.EOF {
r.dec.Reset(nil)
r.pool.Put(r.dec)
r.dec = nil
}
return dec, err
}
func (r *pooledZipReader) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
r.pool.Put(r.dec)
r.dec = nil
}
return err
}
type pooledZipWriter struct {
mu sync.Mutex // guards Close and Read
enc *Encoder
pool *sync.Pool
}
func (w *pooledZipWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
if w.enc == nil {
return 0, errors.New("Write after Close")
}
return w.enc.Write(p)
}
func (w *pooledZipWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
var err error
if w.enc != nil {
err = w.enc.Close()
w.pool.Put(w.enc)
w.enc = nil
}
return err
}
// ZipCompressor returns a compressor that can be registered with zip libraries.
// The provided encoder options will be used on all encodes.
func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
var pool sync.Pool
return func(w io.Writer) (io.WriteCloser, error) {
enc, ok := pool.Get().(*Encoder)
if ok {
enc.Reset(w)
} else {
var err error
enc, err = NewWriter(w, opts...)
if err != nil {
return nil, err
}
}
return &pooledZipWriter{enc: enc, pool: &pool}, nil
}
}
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
// Options can be specified. WithDecoderConcurrency(1) is forced,
// and by default a 128MB maximum decompression window is specified.
// The window size can be overridden if required.
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
return newZipReader(opts...)
}
// Package zstd provides decompression of zstandard files.
//
// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd
package zstd
import (
"bytes"
"errors"
"log"
"math"
"github.com/klauspost/compress/internal/le"
)
// enable debug printing
const debug = false
// enable encoding debug printing
const debugEncoder = debug
// enable decoding debug printing
const debugDecoder = debug
// Enable extra assertions.
const debugAsserts = debug || false
// print sequence details
const debugSequences = false
// print detailed matching information
const debugMatches = false
// force encoder to use predefined tables.
const forcePreDef = false
// zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
ErrReservedBlockType = errors.New("invalid input: reserved block type encountered")
// ErrCompressedSizeTooBig is returned when a block is bigger than allowed.
// Typically this indicates wrong or corrupted input.
ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big")
// ErrBlockTooSmall is returned when a block is too small to be decoded.
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
// ErrUnexpectedBlockSize is returned when a block has unexpected size.
// Typically returned on invalid input.
ErrUnexpectedBlockSize = errors.New("unexpected block size")
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
// ErrWindowSizeExceeded is returned when a reference exceeds the valid window size.
// Typically this indicates wrong or corrupted input.
ErrWindowSizeExceeded = errors.New("window size exceeded")
// ErrWindowSizeTooSmall is returned when no window size is specified.
// Typically this indicates wrong or corrupted input.
ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small")
// ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit.
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
ErrUnknownDictionary = errors.New("unknown dictionary")
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
// This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeExceeded = errors.New("frame size exceeded")
// ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
// This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
// ErrCRCMismatch is returned if CRC mismatches.
ErrCRCMismatch = errors.New("CRC check failed")
// ErrDecoderClosed will be returned if the Decoder was used after
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
// ErrEncoderClosed will be returned if the Encoder was used after
// Close has been called.
ErrEncoderClosed = errors.New("encoder used after Close")
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
)
func println(a ...interface{}) {
if debug || debugDecoder || debugEncoder {
log.Println(a...)
}
}
func printf(format string, a ...interface{}) {
if debug || debugDecoder || debugEncoder {
log.Printf(format, a...)
}
}
func load3232(b []byte, i int32) uint32 {
return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
return le.Load64(b, i)
}
type byter interface {
Bytes() []byte
Len() int
}
var _ byter = &bytes.Buffer{}
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
import (
"flag"
"fmt"
"io"
"log"
"os"
"runtime"
"runtime/pprof"
"strings"
"testing"
"time"
)
var isRaceTest bool
// Fuzzing tweaks:
var fuzzStartF = flag.Int("fuzz-start", int(SpeedFastest), "Start fuzzing at this level")
var fuzzEndF = flag.Int("fuzz-end", int(SpeedBestCompression), "End fuzzing at this level (inclusive)")
var fuzzMaxF = flag.Int("fuzz-max", 1<<20, "Maximum input size")
func TestMain(m *testing.M) {
flag.Parse()
ec := m.Run()
if ec == 0 && runtime.NumGoroutine() > 2 {
n := 0
for n < 15 {
n++
time.Sleep(time.Second)
if runtime.NumGoroutine() == 2 {
os.Exit(0)
}
}
fmt.Println("goroutines:", runtime.NumGoroutine())
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
os.Exit(1)
}
os.Exit(ec)
}
func TestMatchLen(t *testing.T) {
a := make([]byte, 130)
for i := range a {
a[i] = byte(i)
}
b := append([]byte{}, a...)
check := func(x, y []byte, l int) {
if m := matchLen(x, y); m != l {
t.Error("expected", l, "got", m)
}
}
for l := range a {
a[l] = ^a[l]
check(a, b, l)
check(a[:l], b, l)
a[l] = ^a[l]
}
}
func TestWriterMemUsage(t *testing.T) {
testMem := func(t *testing.T, fn func()) {
var before, after runtime.MemStats
var w io.Writer
if false {
f, err := os.Create(strings.ReplaceAll(fmt.Sprintf("%s.pprof", t.Name()), "/", "_"))
if err != nil {
log.Fatal(err)
}
defer f.Close()
w = f
t.Logf("opened memory profile %s", t.Name())
}
runtime.GC()
runtime.ReadMemStats(&before)
fn()
runtime.GC()
runtime.ReadMemStats(&after)
if w != nil {
pprof.WriteHeapProfile(w)
}
t.Log("wrote profile")
t.Logf("%s: Memory Used: %dMB, %d allocs", t.Name(), (after.HeapInuse-before.HeapInuse)/1024/1024, after.HeapObjects-before.HeapObjects)
}
data := make([]byte, 10<<20)
t.Run("enc-all-lower", func(t *testing.T) {
for level := SpeedFastest; level <= SpeedBestCompression; level++ {
t.Run(fmt.Sprint("level-", level), func(t *testing.T) {
var zr *Encoder
var err error
dst := make([]byte, 0, len(data)*2)
testMem(t, func() {
zr, err = NewWriter(io.Discard, WithEncoderConcurrency(32), WithEncoderLevel(level), WithLowerEncoderMem(false), WithWindowSize(1<<20))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
_ = zr.EncodeAll(data, dst[:0])
}
})
zr.Close()
})
}
})
}
var data = []byte{1, 2, 3}
func newZstdWriter() (*Encoder, error) {
return NewWriter(
io.Discard,
WithEncoderLevel(SpeedBetterCompression),
WithEncoderConcurrency(16), // we implicitly get this concurrency level if we run on 16 core CPU
WithLowerEncoderMem(false),
WithWindowSize(1<<20),
)
}
func BenchmarkMem(b *testing.B) {
b.Run("flush", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
w, err := newZstdWriter()
if err != nil {
b.Fatal(err)
}
for j := 0; j < 16; j++ {
w.Reset(io.Discard)
if _, err := w.Write(data); err != nil {
b.Fatal(err)
}
if err := w.Flush(); err != nil {
b.Fatal(err)
}
if err := w.Close(); err != nil {
b.Fatal(err)
}
}
}
})
b.Run("no-flush", func(b *testing.B) {
// Will use encodeAll for block.
b.ReportAllocs()
for i := 0; i < b.N; i++ {
w, err := newZstdWriter()
if err != nil {
b.Fatal(err)
}
for j := 0; j < 16; j++ {
w.Reset(io.Discard)
if _, err := w.Write(data); err != nil {
b.Fatal(err)
}
if err := w.Close(); err != nil {
b.Fatal(err)
}
}
}
})
}